"csrc/quantization/marlin/marlin.cuh" did not exist on "2ca8867f0322aac5927d6b6741619ec36349c7ac"
lgb.cv.R 19.2 KB
Newer Older
James Lamb's avatar
James Lamb committed
1
2
#' @importFrom R6 R6Class
CVBooster <- R6::R6Class(
3
  classname = "lgb.CVBooster",
4
  cloneable = FALSE,
Guolin Ke's avatar
Guolin Ke committed
5
  public = list(
6
    best_iter = -1L,
7
    best_score = NA,
Guolin Ke's avatar
Guolin Ke committed
8
    record_evals = list(),
9
10
    boosters = list(),
    initialize = function(x) {
Guolin Ke's avatar
Guolin Ke committed
11
      self$boosters <- x
12
    },
13
14
15
    reset_parameter = function(new_params) {
      for (x in boosters) { x$reset_parameter(new_params) }
      self
Guolin Ke's avatar
Guolin Ke committed
16
17
18
19
    }
  )
)

20
#' @name lgb.cv
James Lamb's avatar
James Lamb committed
21
#' @title Main CV logic for LightGBM
James Lamb's avatar
James Lamb committed
22
23
#' @description Cross validation logic used by LightGBM
#' @inheritParams lgb_shared_params
24
#' @param nfold the original dataset is randomly partitioned into \code{nfold} equal size subsamples.
25
#' @param label Vector of labels, used if \code{data} is not an \code{\link{lgb.Dataset}}
26
#' @param weight vector of response values. If not NULL, will set to dataset
27
#' @param record Boolean, TRUE will record iteration message to \code{booster$record_evals}
Guolin Ke's avatar
Guolin Ke committed
28
#' @param showsd \code{boolean}, whether to show standard deviation of cross validation
29
#' @param stratified a \code{boolean} indicating whether sampling of folds should be stratified
30
#'                   by the values of outcome labels.
Guolin Ke's avatar
Guolin Ke committed
31
#' @param folds \code{list} provides a possibility to use a list of pre-defined CV folds
32
33
#'              (each element must be a vector of test fold's indices). When folds are supplied,
#'              the \code{nfold} and \code{stratified} parameters are ignored.
Guolin Ke's avatar
Guolin Ke committed
34
#' @param colnames feature names, if not null, will use this to overwrite the names in dataset
35
36
37
#' @param categorical_feature categorical features. This can either be a character vector of feature
#'                            names or an integer vector with the indices of the features (e.g.
#'                            \code{c(1L, 10L)} to say "the first and tenth columns").
38
39
40
#' @param callbacks List of callback functions that are applied at each iteration.
#' @param reset_data Boolean, setting it to TRUE (not the default value) will transform the booster model
#'                   into a predictor model which frees up memory and the original datasets
James Lamb's avatar
James Lamb committed
41
42
#' @param ... other parameters, see Parameters.rst for more information. A few key parameters:
#'            \itemize{
43
44
45
#'                \item{\code{boosting}: Boosting type. \code{"gbdt"}, \code{"rf"}, \code{"dart"} or \code{"goss"}.}
#'                \item{\code{num_leaves}: Maximum number of leaves in one tree.}
#'                \item{\code{max_depth}: Limit the max depth for tree model. This is used to deal with
James Lamb's avatar
James Lamb committed
46
#'                                 overfit when #data is small. Tree still grow by leaf-wise.}
47
#'                \item{\code{num_threads}: Number of threads for LightGBM. For the best speed, set this to
48
#'                                   the number of real CPU cores, not the number of threads (most
James Lamb's avatar
James Lamb committed
49
50
#'                                   CPU using hyper-threading to generate 2 threads per CPU core).}
#'            }
51
#' @inheritSection lgb_shared_params Early Stopping
52
#' @return a trained model \code{lgb.CVBooster}.
53
#'
Guolin Ke's avatar
Guolin Ke committed
54
#' @examples
55
#' \dontrun{
56
57
58
59
#' data(agaricus.train, package = "lightgbm")
#' train <- agaricus.train
#' dtrain <- lgb.Dataset(train$data, label = train$label)
#' params <- list(objective = "regression", metric = "l2")
60
61
62
#' model <- lgb.cv(
#'   params = params
#'   , data = dtrain
63
#'   , nrounds = 5L
64
65
66
#'   , nfold = 3L
#'   , min_data = 1L
#'   , learning_rate = 1.0
67
#' )
68
#' }
69
#' @importFrom data.table data.table setorderv
Guolin Ke's avatar
Guolin Ke committed
70
#' @export
71
72
73
74
75
76
77
78
79
80
81
lgb.cv <- function(params = list()
                   , data
                   , nrounds = 10L
                   , nfold = 3L
                   , label = NULL
                   , weight = NULL
                   , obj = NULL
                   , eval = NULL
                   , verbose = 1L
                   , record = TRUE
                   , eval_freq = 1L
82
                   , showsd = TRUE
83
84
85
86
87
88
89
90
91
92
                   , stratified = TRUE
                   , folds = NULL
                   , init_model = NULL
                   , colnames = NULL
                   , categorical_feature = NULL
                   , early_stopping_rounds = NULL
                   , callbacks = list()
                   , reset_data = FALSE
                   , ...
                   ) {
93

94
95
96
97
98
99
100
101
102
103
104
105
  if (nrounds <= 0L) {
    stop("nrounds should be greater than zero")
  }

  # If 'data' is not an lgb.Dataset, try to construct one using 'label'
  if (!lgb.is.Dataset(data)) {
    if (is.null(label)) {
      stop("'label' must be provided for lgb.cv if 'data' is not an 'lgb.Dataset'")
    }
    data <- lgb.Dataset(data, label = label)
  }

106
  # Setup temporary variables
107
  params <- append(params, list(...))
108
109
110
111
  params$verbose <- verbose
  params <- lgb.check.obj(params, obj)
  params <- lgb.check.eval(params, eval)
  fobj <- NULL
112
  eval_functions <- list(NULL)
113

114
  # Check for objective (function or not)
115
  if (is.function(params$objective)) {
Guolin Ke's avatar
Guolin Ke committed
116
117
118
    fobj <- params$objective
    params$objective <- "NONE"
  }
119

120
  # If eval is a single function, store it as a 1-element list
121
  # (for backwards compatibility). If it is a list of functions, store
122
123
  # all of them. This makes it possible to pass any mix of strings like "auc"
  # and custom functions to eval
124
  if (is.function(eval)) {
125
126
127
128
129
130
131
    eval_functions <- list(eval)
  }
  if (methods::is(eval, "list")) {
    eval_functions <- Filter(
      f = is.function
      , x = eval
    )
132
  }
133

134
  # Init predictor to empty
Guolin Ke's avatar
Guolin Ke committed
135
  predictor <- NULL
136

137
  # Check for boosting from a trained model
138
  if (is.character(init_model)) {
Guolin Ke's avatar
Guolin Ke committed
139
    predictor <- Predictor$new(init_model)
140
  } else if (lgb.is.Booster(init_model)) {
Guolin Ke's avatar
Guolin Ke committed
141
142
    predictor <- init_model$to_predictor()
  }
143

144
  # Set the iteration to start from / end to (and check for boosting from a trained model, again)
145
  begin_iteration <- 1L
146
  if (!is.null(predictor)) {
147
    begin_iteration <- predictor$current_iter() + 1L
Guolin Ke's avatar
Guolin Ke committed
148
  }
149
  # Check for number of rounds passed as parameter - in case there are multiple ones, take only the first one
150
  n_trees <- .PARAMETER_ALIASES()[["num_iterations"]]
151
  if (any(names(params) %in% n_trees)) {
152
    end_iteration <- begin_iteration + params[[which(names(params) %in% n_trees)[1L]]] - 1L
153
  } else {
154
    end_iteration <- begin_iteration + nrounds - 1L
155
  }
156

157
158
159
160
161
162
163
164
165
  # Check interaction constraints
  cnames <- NULL
  if (!is.null(colnames)) {
    cnames <- colnames
  } else if (!is.null(data$get_colnames())) {
    cnames <- data$get_colnames()
  }
  params[["interaction_constraints"]] <- lgb.check_interaction_constraints(params, cnames)

166
167
  # Check for weights
  if (!is.null(weight)) {
168
    data$setinfo("weight", weight)
169
  }
170

171
  # Update parameters with parsed parameters
Guolin Ke's avatar
Guolin Ke committed
172
  data$update_params(params)
173

174
  # Create the predictor set
Guolin Ke's avatar
Guolin Ke committed
175
  data$.__enclos_env__$private$set_predictor(predictor)
176

177
178
179
180
  # Write column names
  if (!is.null(colnames)) {
    data$set_colnames(colnames)
  }
181

182
183
184
185
  # Write categorical features
  if (!is.null(categorical_feature)) {
    data$set_categorical_feature(categorical_feature)
  }
186

187
  # Construct datasets, if needed
Guolin Ke's avatar
Guolin Ke committed
188
  data$construct()
189

190
  # Check for folds
191
  if (!is.null(folds)) {
192

193
    # Check for list of folds or for single value
194
    if (!identical(class(folds), "list") || length(folds) < 2L) {
195
      stop(sQuote("folds"), " must be a list with 2 or more elements that are vectors of indices for each CV-fold")
196
    }
197

198
    # Set number of folds
Guolin Ke's avatar
Guolin Ke committed
199
    nfold <- length(folds)
200

Guolin Ke's avatar
Guolin Ke committed
201
  } else {
202

203
    # Check fold value
204
    if (nfold <= 1L) {
205
206
      stop(sQuote("nfold"), " must be > 1")
    }
207

208
    # Create folds
209
    folds <- generate.cv.folds(
210
211
212
213
214
215
      nfold = nfold
      , nrows = nrow(data)
      , stratified = stratified
      , label = getinfo(data, "label")
      , group = getinfo(data, "group")
      , params = params
216
    )
217

Guolin Ke's avatar
Guolin Ke committed
218
  }
219

220
  # Add printing log callback
221
  if (verbose > 0L && eval_freq > 0L) {
Guolin Ke's avatar
Guolin Ke committed
222
223
    callbacks <- add.cb(callbacks, cb.print.evaluation(eval_freq))
  }
224

225
226
227
228
  # Add evaluation log callback
  if (record) {
    callbacks <- add.cb(callbacks, cb.record.evaluation())
  }
229

230
231
232
233
234
  # If early stopping was passed as a parameter in params(), prefer that to keyword argument
  # early_stopping_rounds by overwriting the value in 'early_stopping_rounds'
  early_stop <- .PARAMETER_ALIASES()[["early_stopping_round"]]
  early_stop_param_indx <- names(params) %in% early_stop
  if (any(early_stop_param_indx)) {
235
    first_early_stop_param <- which(early_stop_param_indx)[[1L]]
236
237
238
239
240
    first_early_stop_param_name <- names(params)[[first_early_stop_param]]
    early_stopping_rounds <- params[[first_early_stop_param_name]]
  }

  # Did user pass parameters that indicate they want to use early stopping?
241
  using_early_stopping_via_args <- !is.null(early_stopping_rounds) && early_stopping_rounds > 0L
242
243
244
245
246

  boosting_param_names <- .PARAMETER_ALIASES()[["boosting"]]
  using_dart <- any(
    sapply(
      X = boosting_param_names
247
248
      , FUN = function(param) {
        identical(params[[param]], "dart")
249
      }
250
251
252
253
    )
  )

  # Cannot use early stopping with 'dart' boosting
254
  if (using_dart) {
255
256
257
258
259
    warning("Early stopping is not available in 'dart' mode.")
    using_early_stopping_via_args <- FALSE

    # Remove the cb.early.stop() function if it was passed in to callbacks
    callbacks <- Filter(
260
      f = function(cb_func) {
261
262
263
264
265
266
267
        !identical(attr(cb_func, "name"), "cb.early.stop")
      }
      , x = callbacks
    )
  }

  # If user supplied early_stopping_rounds, add the early stopping callback
268
  if (using_early_stopping_via_args) {
269
270
271
272
    callbacks <- add.cb(
      callbacks
      , cb.early.stop(
        stopping_rounds = early_stopping_rounds
273
        , first_metric_only = isTRUE(params[["first_metric_only"]])
274
275
276
        , verbose = verbose
      )
    )
Guolin Ke's avatar
Guolin Ke committed
277
  }
278

Guolin Ke's avatar
Guolin Ke committed
279
  cb <- categorize.callbacks(callbacks)
280

281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
  # Construct booster for each fold. The data.table() code below is used to
  # guarantee that indices are sorted while keeping init_score and weight together
  # with the correct indices. Note that it takes advantage of the fact that
  # someDT$some_column returns NULL is 'some_column' does not exist in the data.table
  bst_folds <- lapply(
    X = seq_along(folds)
    , FUN = function(k) {

      # For learning-to-rank, each fold is a named list with two elements:
      #   * `fold` = an integer vector of row indices
      #   * `group` = an integer vector describing which groups are in the fold
      # For classification or regression tasks, it will just be an integer
      # vector of row indices
      folds_have_group <- "group" %in% names(folds[[k]])
      if (folds_have_group) {
        test_indices <- folds[[k]]$fold
        test_group_indices <- folds[[k]]$group
        test_groups <- getinfo(data, "group")[test_group_indices]
        train_groups <- getinfo(data, "group")[-test_group_indices]
      } else {
        test_indices <- folds[[k]]
      }
      train_indices <- seq_len(nrow(data))[-test_indices]

      # set up test set
      indexDT <- data.table::data.table(
        indices = test_indices
        , weight = getinfo(data, "weight")[test_indices]
        , init_score = getinfo(data, "init_score")[test_indices]
      )
      data.table::setorderv(indexDT, "indices", order = 1L)
      dtest <- slice(data, indexDT$indices)
      setinfo(dtest, "weight", indexDT$weight)
      setinfo(dtest, "init_score", indexDT$init_score)

      # set up training set
      indexDT <- data.table::data.table(
        indices = train_indices
        , weight = getinfo(data, "weight")[train_indices]
        , init_score = getinfo(data, "init_score")[train_indices]
      )
      data.table::setorderv(indexDT, "indices", order = 1L)
      dtrain <- slice(data, indexDT$indices)
      setinfo(dtrain, "weight", indexDT$weight)
      setinfo(dtrain, "init_score", indexDT$init_score)

      if (folds_have_group) {
        setinfo(dtest, "group", test_groups)
        setinfo(dtrain, "group", train_groups)
      }

332
333
      booster <- Booster$new(params, dtrain)
      booster$add_valid(dtest, "valid")
334
335
336
337
338
      return(
        list(booster = booster)
      )
    }
  )
339

340
  # Create new booster
Guolin Ke's avatar
Guolin Ke committed
341
  cv_booster <- CVBooster$new(bst_folds)
342

343
344
345
  # Callback env
  env <- CB_ENV$new()
  env$model <- cv_booster
Guolin Ke's avatar
Guolin Ke committed
346
  env$begin_iteration <- begin_iteration
347
  env$end_iteration <- end_iteration
348

349
  # Start training model using number of iterations to start and end with
350
  for (i in seq.int(from = begin_iteration, to = end_iteration)) {
351

352
    # Overwrite iteration in environment
Guolin Ke's avatar
Guolin Ke committed
353
354
    env$iteration <- i
    env$eval_list <- list()
355

356
357
358
    for (f in cb$pre_iter) {
      f(env)
    }
359

360
    # Update one boosting iteration
Guolin Ke's avatar
Guolin Ke committed
361
    msg <- lapply(cv_booster$boosters, function(fd) {
362
      fd$booster$update(fobj = fobj)
363
364
365
366
367
      out <- list()
      for (eval_function in eval_functions) {
        out <- append(out, fd$booster$eval_valid(feval = eval_function))
      }
      return(out)
Guolin Ke's avatar
Guolin Ke committed
368
    })
369

370
    # Prepare collection of evaluation results
Guolin Ke's avatar
Guolin Ke committed
371
    merged_msg <- lgb.merge.cv.result(msg)
372

373
    # Write evaluation result in environment
Guolin Ke's avatar
Guolin Ke committed
374
    env$eval_list <- merged_msg$eval_list
375

376
    # Check for standard deviation requirement
377
    if (showsd) {
378
379
      env$eval_err_list <- merged_msg$eval_err_list
    }
380

381
382
383
384
    # Loop through env
    for (f in cb$post_iter) {
      f(env)
    }
385

386
    # Check for early stopping and break if needed
387
    if (env$met_early_stop) break
388

Guolin Ke's avatar
Guolin Ke committed
389
  }
390

391
392
  # When early stopping is not activated, we compute the best iteration / score ourselves
  # based on the first first metric
393
  if (record && is.na(env$best_score)) {
394
395
396
397
398
399
400
    # when using a custom eval function, the metric name is returned from the
    # function, so figure it out from record_evals
    if (!is.null(eval_functions[1L])) {
      first_metric <- names(cv_booster$record_evals[["valid"]])[1L]
    } else {
      first_metric <- cv_booster$.__enclos_env__$private$eval_names[1L]
    }
401
402
403
    .find_best <- which.min
    if (isTRUE(env$eval_list[[1L]]$higher_better[1L])) {
      .find_best <- which.max
404
    }
405
406
407
408
409
410
411
412
    cv_booster$best_iter <- unname(
      .find_best(
        unlist(
          cv_booster$record_evals[["valid"]][[first_metric]][[.EVAL_KEY()]]
        )
      )
    )
    cv_booster$best_score <- cv_booster$record_evals[["valid"]][[first_metric]][[.EVAL_KEY()]][[cv_booster$best_iter]]
413
  }
414

415
416
417
  if (reset_data) {
    lapply(cv_booster$boosters, function(fd) {
      # Store temporarily model data elsewhere
418
419
      booster_old <- list(
        best_iter = fd$booster$best_iter
420
        , best_score = fd$booster$best_score
421
422
        , record_evals = fd$booster$record_evals
      )
423
424
425
426
427
428
429
      # Reload model
      fd$booster <- lgb.load(model_str = fd$booster$save_model_to_string())
      fd$booster$best_iter <- booster_old$best_iter
      fd$booster$best_score <- booster_old$best_score
      fd$booster$record_evals <- booster_old$record_evals
    })
  }
430

431
  return(cv_booster)
432

Guolin Ke's avatar
Guolin Ke committed
433
434
435
}

# Generates random (stratified if needed) CV folds
436
generate.cv.folds <- function(nfold, nrows, stratified, label, group, params) {
437

438
439
  # Check for group existence
  if (is.null(group)) {
440

441
    # Shuffle
442
    rnd_idx <- sample.int(nrows)
443

444
    # Request stratified folds
445
    if (isTRUE(stratified) && params$objective %in% c("binary", "multiclass") && length(label) == length(rnd_idx)) {
446

447
448
449
      y <- label[rnd_idx]
      y <- factor(y)
      folds <- lgb.stratified.folds(y, nfold)
450

451
    } else {
452

453
454
      # Make simple non-stratified folds
      folds <- list()
455

456
      # Loop through each fold
457
      for (i in seq_len(nfold)) {
458
        kstep <- length(rnd_idx) %/% (nfold - i + 1L)
459
        folds[[i]] <- rnd_idx[seq_len(kstep)]
460
        rnd_idx <- rnd_idx[-seq_len(kstep)]
461
      }
462

463
    }
464

Guolin Ke's avatar
Guolin Ke committed
465
  } else {
466

467
468
469
470
    # When doing group, stratified is not possible (only random selection)
    if (nfold > length(group)) {
      stop("\n\tYou requested too many folds for the number of available groups.\n")
    }
471

472
    # Degroup the groups
473
    ungrouped <- inverse.rle(list(lengths = group, values = seq_along(group)))
474

475
    # Can't stratify, shuffle
476
    rnd_idx <- sample.int(length(group))
477

478
    # Make simple non-stratified folds
Guolin Ke's avatar
Guolin Ke committed
479
    folds <- list()
480

481
    # Loop through each fold
482
    for (i in seq_len(nfold)) {
483
      kstep <- length(rnd_idx) %/% (nfold - i + 1L)
484
485
486
487
      folds[[i]] <- list(
        fold = which(ungrouped %in% rnd_idx[seq_len(kstep)])
        , group = rnd_idx[seq_len(kstep)]
      )
488
      rnd_idx <- rnd_idx[-seq_len(kstep)]
Guolin Ke's avatar
Guolin Ke committed
489
    }
490

Guolin Ke's avatar
Guolin Ke committed
491
  }
492

493
  return(folds)
494

Guolin Ke's avatar
Guolin Ke committed
495
496
497
498
499
}

# Creates CV folds stratified by the values of y.
# It was borrowed from caret::lgb.stratified.folds and simplified
# by always returning an unnamed list of fold indices.
500
#' @importFrom stats quantile
501
lgb.stratified.folds <- function(y, k = 10L) {
502

503
504
505
506
507
508
509
510
  ## Group the numeric data based on their magnitudes
  ## and sample within those groups.
  ## When the number of samples is low, we may have
  ## issues further slicing the numeric data into
  ## groups. The number of groups will depend on the
  ## ratio of the number of folds to the sample size.
  ## At most, we will use quantiles. If the sample
  ## is too small, we just do regular unstratified CV
Guolin Ke's avatar
Guolin Ke committed
511
  if (is.numeric(y)) {
512

513
    cuts <- length(y) %/% k
514
515
    if (cuts < 2L) {
      cuts <- 2L
516
    }
517
518
    if (cuts > 5L) {
      cuts <- 5L
519
520
521
    }
    y <- cut(
      y
522
      , unique(stats::quantile(y, probs = seq.int(0.0, 1.0, length.out = cuts)))
523
524
      , include.lowest = TRUE
    )
525

Guolin Ke's avatar
Guolin Ke committed
526
  }
527

Guolin Ke's avatar
Guolin Ke committed
528
  if (k < length(y)) {
529

530
    ## Reset levels so that the possible levels and
Guolin Ke's avatar
Guolin Ke committed
531
532
533
534
    ## the levels in the vector are the same
    y <- factor(as.character(y))
    numInClass <- table(y)
    foldVector <- vector(mode = "integer", length(y))
535

Guolin Ke's avatar
Guolin Ke committed
536
537
538
    ## For each class, balance the fold allocation as far
    ## as possible, then resample the remainder.
    ## The final assignment of folds is also randomized.
539

540
    for (i in seq_along(numInClass)) {
541

542
      ## Create a vector of integers from 1:k as many times as possible without
Guolin Ke's avatar
Guolin Ke committed
543
544
      ## going over the number of samples in the class. Note that if the number
      ## of samples in a class is less than k, nothing is producd here.
545
      seqVector <- rep(seq_len(k), numInClass[i] %/% k)
546

547
      ## Add enough random integers to get  length(seqVector) == numInClass[i]
548
      if (numInClass[i] %% k > 0L) {
549
        seqVector <- c(seqVector, sample.int(k, numInClass[i] %% k))
550
      }
551

552
      ## Shuffle the integers for fold assignment and assign to this classes's data
553
      foldVector[y == dimnames(numInClass)$y[i]] <- sample(seqVector)
554

Guolin Ke's avatar
Guolin Ke committed
555
    }
556

Guolin Ke's avatar
Guolin Ke committed
557
  } else {
558

Guolin Ke's avatar
Guolin Ke committed
559
    foldVector <- seq(along = y)
560

Guolin Ke's avatar
Guolin Ke committed
561
  }
562

Guolin Ke's avatar
Guolin Ke committed
563
  out <- split(seq(along = y), foldVector)
564
565
  names(out) <- NULL
  out
Guolin Ke's avatar
Guolin Ke committed
566
567
}

568
lgb.merge.cv.result <- function(msg, showsd = TRUE) {
569

570
  # Get CV message length
571
  if (length(msg) == 0L) {
572
573
    stop("lgb.cv: size of cv result error")
  }
574

575
  # Get evaluation message length
576
  eval_len <- length(msg[[1L]])
577

578
  # Is evaluation message empty?
579
  if (eval_len == 0L) {
580
581
    stop("lgb.cv: should provide at least one metric for CV")
  }
582

583
  # Get evaluation results using a list apply
584
  eval_result <- lapply(seq_len(eval_len), function(j) {
585
586
    as.numeric(lapply(seq_along(msg), function(i) {
      msg[[i]][[j]]$value }))
Guolin Ke's avatar
Guolin Ke committed
587
  })
588

589
  # Get evaluation. Just taking the first element here to
590
  # get structure (name, higher_better, data_name)
591
  ret_eval <- msg[[1L]]
592

593
594
595
596
  # Go through evaluation length items
  for (j in seq_len(eval_len)) {
    ret_eval[[j]]$value <- mean(eval_result[[j]])
  }
597

Guolin Ke's avatar
Guolin Ke committed
598
  ret_eval_err <- NULL
599

600
  # Check for standard deviation
601
  if (showsd) {
602

603
    # Parse standard deviation
604
    for (j in seq_len(eval_len)) {
605
606
      ret_eval_err <- c(
        ret_eval_err
607
        , sqrt(mean(eval_result[[j]] ^ 2L) - mean(eval_result[[j]]) ^ 2L)
608
      )
Guolin Ke's avatar
Guolin Ke committed
609
    }
610

611
    # Convert to list
Guolin Ke's avatar
Guolin Ke committed
612
    ret_eval_err <- as.list(ret_eval_err)
613

Guolin Ke's avatar
Guolin Ke committed
614
  }
615

616
  # Return errors
617
618
619
620
  list(
    eval_list = ret_eval
    , eval_err_list = ret_eval_err
  )
621

622
}