lgb.cv.R 19.3 KB
Newer Older
James Lamb's avatar
James Lamb committed
1
2
#' @importFrom R6 R6Class
CVBooster <- R6::R6Class(
3
  classname = "lgb.CVBooster",
4
  cloneable = FALSE,
Guolin Ke's avatar
Guolin Ke committed
5
  public = list(
6
    best_iter = -1L,
7
    best_score = NA,
Guolin Ke's avatar
Guolin Ke committed
8
    record_evals = list(),
9
10
    boosters = list(),
    initialize = function(x) {
Guolin Ke's avatar
Guolin Ke committed
11
      self$boosters <- x
12
    },
13
14
15
    reset_parameter = function(new_params) {
      for (x in boosters) { x$reset_parameter(new_params) }
      self
Guolin Ke's avatar
Guolin Ke committed
16
17
18
19
    }
  )
)

20
#' @name lgb.cv
James Lamb's avatar
James Lamb committed
21
#' @title Main CV logic for LightGBM
James Lamb's avatar
James Lamb committed
22
23
#' @description Cross validation logic used by LightGBM
#' @inheritParams lgb_shared_params
24
#' @param nfold the original dataset is randomly partitioned into \code{nfold} equal size subsamples.
25
#' @param label Vector of labels, used if \code{data} is not an \code{\link{lgb.Dataset}}
26
#' @param weight vector of response values. If not NULL, will set to dataset
27
#' @param record Boolean, TRUE will record iteration message to \code{booster$record_evals}
Guolin Ke's avatar
Guolin Ke committed
28
#' @param showsd \code{boolean}, whether to show standard deviation of cross validation
29
#' @param stratified a \code{boolean} indicating whether sampling of folds should be stratified
30
#'                   by the values of outcome labels.
Guolin Ke's avatar
Guolin Ke committed
31
#' @param folds \code{list} provides a possibility to use a list of pre-defined CV folds
32
33
#'              (each element must be a vector of test fold's indices). When folds are supplied,
#'              the \code{nfold} and \code{stratified} parameters are ignored.
Guolin Ke's avatar
Guolin Ke committed
34
#' @param colnames feature names, if not null, will use this to overwrite the names in dataset
35
36
37
#' @param categorical_feature categorical features. This can either be a character vector of feature
#'                            names or an integer vector with the indices of the features (e.g.
#'                            \code{c(1L, 10L)} to say "the first and tenth columns").
38
39
40
#' @param callbacks List of callback functions that are applied at each iteration.
#' @param reset_data Boolean, setting it to TRUE (not the default value) will transform the booster model
#'                   into a predictor model which frees up memory and the original datasets
James Lamb's avatar
James Lamb committed
41
42
#' @param ... other parameters, see Parameters.rst for more information. A few key parameters:
#'            \itemize{
43
44
45
#'                \item{\code{boosting}: Boosting type. \code{"gbdt"}, \code{"rf"}, \code{"dart"} or \code{"goss"}.}
#'                \item{\code{num_leaves}: Maximum number of leaves in one tree.}
#'                \item{\code{max_depth}: Limit the max depth for tree model. This is used to deal with
James Lamb's avatar
James Lamb committed
46
#'                                 overfit when #data is small. Tree still grow by leaf-wise.}
47
#'                \item{\code{num_threads}: Number of threads for LightGBM. For the best speed, set this to
48
#'                                   the number of real CPU cores, not the number of threads (most
James Lamb's avatar
James Lamb committed
49
50
#'                                   CPU using hyper-threading to generate 2 threads per CPU core).}
#'            }
51
#' @inheritSection lgb_shared_params Early Stopping
52
#' @return a trained model \code{lgb.CVBooster}.
53
#'
Guolin Ke's avatar
Guolin Ke committed
54
#' @examples
55
#' \dontrun{
56
57
58
59
#' data(agaricus.train, package = "lightgbm")
#' train <- agaricus.train
#' dtrain <- lgb.Dataset(train$data, label = train$label)
#' params <- list(objective = "regression", metric = "l2")
60
61
62
#' model <- lgb.cv(
#'   params = params
#'   , data = dtrain
63
#'   , nrounds = 5L
64
65
66
#'   , nfold = 3L
#'   , min_data = 1L
#'   , learning_rate = 1.0
67
#' )
68
#' }
69
#' @importFrom data.table data.table setorderv
Guolin Ke's avatar
Guolin Ke committed
70
#' @export
71
72
73
74
75
76
77
78
79
80
81
lgb.cv <- function(params = list()
                   , data
                   , nrounds = 10L
                   , nfold = 3L
                   , label = NULL
                   , weight = NULL
                   , obj = NULL
                   , eval = NULL
                   , verbose = 1L
                   , record = TRUE
                   , eval_freq = 1L
82
                   , showsd = TRUE
83
84
85
86
87
88
89
90
91
92
                   , stratified = TRUE
                   , folds = NULL
                   , init_model = NULL
                   , colnames = NULL
                   , categorical_feature = NULL
                   , early_stopping_rounds = NULL
                   , callbacks = list()
                   , reset_data = FALSE
                   , ...
                   ) {
93

94
95
96
97
98
99
100
101
102
103
104
105
106
  # validate parameters
  if (nrounds <= 0L) {
    stop("nrounds should be greater than zero")
  }

  # If 'data' is not an lgb.Dataset, try to construct one using 'label'
  if (!lgb.is.Dataset(data)) {
    if (is.null(label)) {
      stop("'label' must be provided for lgb.cv if 'data' is not an 'lgb.Dataset'")
    }
    data <- lgb.Dataset(data, label = label)
  }

107
  # Setup temporary variables
108
  params <- append(params, list(...))
109
110
111
112
  params$verbose <- verbose
  params <- lgb.check.obj(params, obj)
  params <- lgb.check.eval(params, eval)
  fobj <- NULL
113
  eval_functions <- list(NULL)
114

115
  # Check for objective (function or not)
116
  if (is.function(params$objective)) {
Guolin Ke's avatar
Guolin Ke committed
117
118
119
    fobj <- params$objective
    params$objective <- "NONE"
  }
120

121
122
123
  # If loss is a single function, store it as a 1-element list
  # (for backwards compatibility). If it is a list of functions, store
  # all of them
124
  if (is.function(eval)) {
125
126
127
128
129
130
131
    eval_functions <- list(eval)
  }
  if (methods::is(eval, "list")) {
    eval_functions <- Filter(
      f = is.function
      , x = eval
    )
132
  }
133

134
  # Init predictor to empty
Guolin Ke's avatar
Guolin Ke committed
135
  predictor <- NULL
136

137
  # Check for boosting from a trained model
138
  if (is.character(init_model)) {
Guolin Ke's avatar
Guolin Ke committed
139
    predictor <- Predictor$new(init_model)
140
  } else if (lgb.is.Booster(init_model)) {
Guolin Ke's avatar
Guolin Ke committed
141
142
    predictor <- init_model$to_predictor()
  }
143

144
  # Set the iteration to start from / end to (and check for boosting from a trained model, again)
145
  begin_iteration <- 1L
146
  if (!is.null(predictor)) {
147
    begin_iteration <- predictor$current_iter() + 1L
Guolin Ke's avatar
Guolin Ke committed
148
  }
149
  # Check for number of rounds passed as parameter - in case there are multiple ones, take only the first one
150
  n_trees <- .PARAMETER_ALIASES()[["num_iterations"]]
151
  if (any(names(params) %in% n_trees)) {
152
    end_iteration <- begin_iteration + params[[which(names(params) %in% n_trees)[1L]]] - 1L
153
  } else {
154
    end_iteration <- begin_iteration + nrounds - 1L
155
  }
156

157
158
159
160
161
162
163
164
165
  # Check interaction constraints
  cnames <- NULL
  if (!is.null(colnames)) {
    cnames <- colnames
  } else if (!is.null(data$get_colnames())) {
    cnames <- data$get_colnames()
  }
  params[["interaction_constraints"]] <- lgb.check_interaction_constraints(params, cnames)

166
167
  # Check for weights
  if (!is.null(weight)) {
168
    data$setinfo("weight", weight)
169
  }
170

171
  # Update parameters with parsed parameters
Guolin Ke's avatar
Guolin Ke committed
172
  data$update_params(params)
173

174
  # Create the predictor set
Guolin Ke's avatar
Guolin Ke committed
175
  data$.__enclos_env__$private$set_predictor(predictor)
176

177
178
179
180
  # Write column names
  if (!is.null(colnames)) {
    data$set_colnames(colnames)
  }
181

182
183
184
185
  # Write categorical features
  if (!is.null(categorical_feature)) {
    data$set_categorical_feature(categorical_feature)
  }
186

187
  # Construct datasets, if needed
Guolin Ke's avatar
Guolin Ke committed
188
  data$construct()
189

190
  # Check for folds
191
  if (!is.null(folds)) {
192

193
    # Check for list of folds or for single value
194
    if (!identical(class(folds), "list") || length(folds) < 2L) {
195
      stop(sQuote("folds"), " must be a list with 2 or more elements that are vectors of indices for each CV-fold")
196
    }
197

198
    # Set number of folds
Guolin Ke's avatar
Guolin Ke committed
199
    nfold <- length(folds)
200

Guolin Ke's avatar
Guolin Ke committed
201
  } else {
202

203
    # Check fold value
204
    if (nfold <= 1L) {
205
206
      stop(sQuote("nfold"), " must be > 1")
    }
207

208
    # Create folds
209
    folds <- generate.cv.folds(
210
211
212
213
214
215
      nfold = nfold
      , nrows = nrow(data)
      , stratified = stratified
      , label = getinfo(data, "label")
      , group = getinfo(data, "group")
      , params = params
216
    )
217

Guolin Ke's avatar
Guolin Ke committed
218
  }
219

220
  # Add printing log callback
221
  if (verbose > 0L && eval_freq > 0L) {
Guolin Ke's avatar
Guolin Ke committed
222
223
    callbacks <- add.cb(callbacks, cb.print.evaluation(eval_freq))
  }
224

225
226
227
228
  # Add evaluation log callback
  if (record) {
    callbacks <- add.cb(callbacks, cb.record.evaluation())
  }
229

230
231
232
233
234
  # If early stopping was passed as a parameter in params(), prefer that to keyword argument
  # early_stopping_rounds by overwriting the value in 'early_stopping_rounds'
  early_stop <- .PARAMETER_ALIASES()[["early_stopping_round"]]
  early_stop_param_indx <- names(params) %in% early_stop
  if (any(early_stop_param_indx)) {
235
    first_early_stop_param <- which(early_stop_param_indx)[[1L]]
236
237
238
239
240
    first_early_stop_param_name <- names(params)[[first_early_stop_param]]
    early_stopping_rounds <- params[[first_early_stop_param_name]]
  }

  # Did user pass parameters that indicate they want to use early stopping?
241
  using_early_stopping_via_args <- !is.null(early_stopping_rounds) && early_stopping_rounds > 0L
242
243
244
245
246

  boosting_param_names <- .PARAMETER_ALIASES()[["boosting"]]
  using_dart <- any(
    sapply(
      X = boosting_param_names
247
248
      , FUN = function(param) {
        identical(params[[param]], "dart")
249
      }
250
251
252
253
    )
  )

  # Cannot use early stopping with 'dart' boosting
254
  if (using_dart) {
255
256
257
258
259
    warning("Early stopping is not available in 'dart' mode.")
    using_early_stopping_via_args <- FALSE

    # Remove the cb.early.stop() function if it was passed in to callbacks
    callbacks <- Filter(
260
      f = function(cb_func) {
261
262
263
264
265
266
267
        !identical(attr(cb_func, "name"), "cb.early.stop")
      }
      , x = callbacks
    )
  }

  # If user supplied early_stopping_rounds, add the early stopping callback
268
  if (using_early_stopping_via_args) {
269
270
271
272
    callbacks <- add.cb(
      callbacks
      , cb.early.stop(
        stopping_rounds = early_stopping_rounds
273
        , first_metric_only = isTRUE(params[["first_metric_only"]])
274
275
276
        , verbose = verbose
      )
    )
Guolin Ke's avatar
Guolin Ke committed
277
  }
278

279
  # Categorize callbacks
Guolin Ke's avatar
Guolin Ke committed
280
  cb <- categorize.callbacks(callbacks)
281

282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
  # Construct booster for each fold. The data.table() code below is used to
  # guarantee that indices are sorted while keeping init_score and weight together
  # with the correct indices. Note that it takes advantage of the fact that
  # someDT$some_column returns NULL is 'some_column' does not exist in the data.table
  bst_folds <- lapply(
    X = seq_along(folds)
    , FUN = function(k) {

      # For learning-to-rank, each fold is a named list with two elements:
      #   * `fold` = an integer vector of row indices
      #   * `group` = an integer vector describing which groups are in the fold
      # For classification or regression tasks, it will just be an integer
      # vector of row indices
      folds_have_group <- "group" %in% names(folds[[k]])
      if (folds_have_group) {
        test_indices <- folds[[k]]$fold
        test_group_indices <- folds[[k]]$group
        test_groups <- getinfo(data, "group")[test_group_indices]
        train_groups <- getinfo(data, "group")[-test_group_indices]
      } else {
        test_indices <- folds[[k]]
      }
      train_indices <- seq_len(nrow(data))[-test_indices]

      # set up test set
      indexDT <- data.table::data.table(
        indices = test_indices
        , weight = getinfo(data, "weight")[test_indices]
        , init_score = getinfo(data, "init_score")[test_indices]
      )
      data.table::setorderv(indexDT, "indices", order = 1L)
      dtest <- slice(data, indexDT$indices)
      setinfo(dtest, "weight", indexDT$weight)
      setinfo(dtest, "init_score", indexDT$init_score)

      # set up training set
      indexDT <- data.table::data.table(
        indices = train_indices
        , weight = getinfo(data, "weight")[train_indices]
        , init_score = getinfo(data, "init_score")[train_indices]
      )
      data.table::setorderv(indexDT, "indices", order = 1L)
      dtrain <- slice(data, indexDT$indices)
      setinfo(dtrain, "weight", indexDT$weight)
      setinfo(dtrain, "init_score", indexDT$init_score)

      if (folds_have_group) {
        setinfo(dtest, "group", test_groups)
        setinfo(dtrain, "group", train_groups)
      }

333
334
      booster <- Booster$new(params, dtrain)
      booster$add_valid(dtest, "valid")
335
336
337
338
339
      return(
        list(booster = booster)
      )
    }
  )
340

341
  # Create new booster
Guolin Ke's avatar
Guolin Ke committed
342
  cv_booster <- CVBooster$new(bst_folds)
343

344
345
346
  # Callback env
  env <- CB_ENV$new()
  env$model <- cv_booster
Guolin Ke's avatar
Guolin Ke committed
347
  env$begin_iteration <- begin_iteration
348
  env$end_iteration <- end_iteration
349

350
  # Start training model using number of iterations to start and end with
351
  for (i in seq.int(from = begin_iteration, to = end_iteration)) {
352

353
    # Overwrite iteration in environment
Guolin Ke's avatar
Guolin Ke committed
354
355
    env$iteration <- i
    env$eval_list <- list()
356

357
358
359
360
    # Loop through "pre_iter" element
    for (f in cb$pre_iter) {
      f(env)
    }
361

362
    # Update one boosting iteration
Guolin Ke's avatar
Guolin Ke committed
363
    msg <- lapply(cv_booster$boosters, function(fd) {
364
      fd$booster$update(fobj = fobj)
365
366
367
368
369
      out <- list()
      for (eval_function in eval_functions) {
        out <- append(out, fd$booster$eval_valid(feval = eval_function))
      }
      return(out)
Guolin Ke's avatar
Guolin Ke committed
370
    })
371

372
    # Prepare collection of evaluation results
Guolin Ke's avatar
Guolin Ke committed
373
    merged_msg <- lgb.merge.cv.result(msg)
374

375
    # Write evaluation result in environment
Guolin Ke's avatar
Guolin Ke committed
376
    env$eval_list <- merged_msg$eval_list
377

378
    # Check for standard deviation requirement
379
    if (showsd) {
380
381
      env$eval_err_list <- merged_msg$eval_err_list
    }
382

383
384
385
386
    # Loop through env
    for (f in cb$post_iter) {
      f(env)
    }
387

388
    # Check for early stopping and break if needed
389
    if (env$met_early_stop) break
390

Guolin Ke's avatar
Guolin Ke committed
391
  }
392

393
394
  # When early stopping is not activated, we compute the best iteration / score ourselves
  # based on the first first metric
395
  if (record && is.na(env$best_score)) {
396
397
398
399
400
401
402
    # when using a custom eval function, the metric name is returned from the
    # function, so figure it out from record_evals
    if (!is.null(eval_functions[1L])) {
      first_metric <- names(cv_booster$record_evals[["valid"]])[1L]
    } else {
      first_metric <- cv_booster$.__enclos_env__$private$eval_names[1L]
    }
403
404
405
    .find_best <- which.min
    if (isTRUE(env$eval_list[[1L]]$higher_better[1L])) {
      .find_best <- which.max
406
    }
407
408
409
410
411
412
413
414
    cv_booster$best_iter <- unname(
      .find_best(
        unlist(
          cv_booster$record_evals[["valid"]][[first_metric]][[.EVAL_KEY()]]
        )
      )
    )
    cv_booster$best_score <- cv_booster$record_evals[["valid"]][[first_metric]][[.EVAL_KEY()]][[cv_booster$best_iter]]
415
  }
416

417
418
419
  if (reset_data) {
    lapply(cv_booster$boosters, function(fd) {
      # Store temporarily model data elsewhere
420
421
      booster_old <- list(
        best_iter = fd$booster$best_iter
422
        , best_score = fd$booster$best_score
423
424
        , record_evals = fd$booster$record_evals
      )
425
426
427
428
429
430
431
      # Reload model
      fd$booster <- lgb.load(model_str = fd$booster$save_model_to_string())
      fd$booster$best_iter <- booster_old$best_iter
      fd$booster$best_score <- booster_old$best_score
      fd$booster$record_evals <- booster_old$record_evals
    })
  }
432

433
434
  # Return booster
  return(cv_booster)
435

Guolin Ke's avatar
Guolin Ke committed
436
437
438
}

# Generates random (stratified if needed) CV folds
439
generate.cv.folds <- function(nfold, nrows, stratified, label, group, params) {
440

441
442
  # Check for group existence
  if (is.null(group)) {
443

444
    # Shuffle
445
    rnd_idx <- sample.int(nrows)
446

447
    # Request stratified folds
448
    if (isTRUE(stratified) && params$objective %in% c("binary", "multiclass") && length(label) == length(rnd_idx)) {
449

450
451
452
      y <- label[rnd_idx]
      y <- factor(y)
      folds <- lgb.stratified.folds(y, nfold)
453

454
    } else {
455

456
457
      # Make simple non-stratified folds
      folds <- list()
458

459
      # Loop through each fold
460
      for (i in seq_len(nfold)) {
461
        kstep <- length(rnd_idx) %/% (nfold - i + 1L)
462
        folds[[i]] <- rnd_idx[seq_len(kstep)]
463
        rnd_idx <- rnd_idx[-seq_len(kstep)]
464
      }
465

466
    }
467

Guolin Ke's avatar
Guolin Ke committed
468
  } else {
469

470
471
472
473
    # When doing group, stratified is not possible (only random selection)
    if (nfold > length(group)) {
      stop("\n\tYou requested too many folds for the number of available groups.\n")
    }
474

475
    # Degroup the groups
476
    ungrouped <- inverse.rle(list(lengths = group, values = seq_along(group)))
477

478
    # Can't stratify, shuffle
479
    rnd_idx <- sample.int(length(group))
480

481
    # Make simple non-stratified folds
Guolin Ke's avatar
Guolin Ke committed
482
    folds <- list()
483

484
    # Loop through each fold
485
    for (i in seq_len(nfold)) {
486
      kstep <- length(rnd_idx) %/% (nfold - i + 1L)
487
488
489
490
      folds[[i]] <- list(
        fold = which(ungrouped %in% rnd_idx[seq_len(kstep)])
        , group = rnd_idx[seq_len(kstep)]
      )
491
      rnd_idx <- rnd_idx[-seq_len(kstep)]
Guolin Ke's avatar
Guolin Ke committed
492
    }
493

Guolin Ke's avatar
Guolin Ke committed
494
  }
495

496
497
  # Return folds
  return(folds)
498

Guolin Ke's avatar
Guolin Ke committed
499
500
501
502
503
}

# Creates CV folds stratified by the values of y.
# It was borrowed from caret::lgb.stratified.folds and simplified
# by always returning an unnamed list of fold indices.
504
#' @importFrom stats quantile
505
lgb.stratified.folds <- function(y, k = 10L) {
506

507
508
509
510
511
512
513
514
  ## Group the numeric data based on their magnitudes
  ## and sample within those groups.
  ## When the number of samples is low, we may have
  ## issues further slicing the numeric data into
  ## groups. The number of groups will depend on the
  ## ratio of the number of folds to the sample size.
  ## At most, we will use quantiles. If the sample
  ## is too small, we just do regular unstratified CV
Guolin Ke's avatar
Guolin Ke committed
515
  if (is.numeric(y)) {
516

517
    cuts <- length(y) %/% k
518
519
    if (cuts < 2L) {
      cuts <- 2L
520
    }
521
522
    if (cuts > 5L) {
      cuts <- 5L
523
524
525
    }
    y <- cut(
      y
526
      , unique(stats::quantile(y, probs = seq.int(0.0, 1.0, length.out = cuts)))
527
528
      , include.lowest = TRUE
    )
529

Guolin Ke's avatar
Guolin Ke committed
530
  }
531

Guolin Ke's avatar
Guolin Ke committed
532
  if (k < length(y)) {
533

534
    ## Reset levels so that the possible levels and
Guolin Ke's avatar
Guolin Ke committed
535
536
537
538
    ## the levels in the vector are the same
    y <- factor(as.character(y))
    numInClass <- table(y)
    foldVector <- vector(mode = "integer", length(y))
539

Guolin Ke's avatar
Guolin Ke committed
540
541
542
    ## For each class, balance the fold allocation as far
    ## as possible, then resample the remainder.
    ## The final assignment of folds is also randomized.
543

544
    for (i in seq_along(numInClass)) {
545

546
      ## Create a vector of integers from 1:k as many times as possible without
Guolin Ke's avatar
Guolin Ke committed
547
548
      ## going over the number of samples in the class. Note that if the number
      ## of samples in a class is less than k, nothing is producd here.
549
      seqVector <- rep(seq_len(k), numInClass[i] %/% k)
550

551
      ## Add enough random integers to get  length(seqVector) == numInClass[i]
552
      if (numInClass[i] %% k > 0L) {
553
        seqVector <- c(seqVector, sample.int(k, numInClass[i] %% k))
554
      }
555

556
      ## Shuffle the integers for fold assignment and assign to this classes's data
557
      foldVector[y == dimnames(numInClass)$y[i]] <- sample(seqVector)
558

Guolin Ke's avatar
Guolin Ke committed
559
    }
560

Guolin Ke's avatar
Guolin Ke committed
561
  } else {
562

Guolin Ke's avatar
Guolin Ke committed
563
    foldVector <- seq(along = y)
564

Guolin Ke's avatar
Guolin Ke committed
565
  }
566

567
  # Return data
Guolin Ke's avatar
Guolin Ke committed
568
  out <- split(seq(along = y), foldVector)
569
570
  names(out) <- NULL
  out
Guolin Ke's avatar
Guolin Ke committed
571
572
}

573
lgb.merge.cv.result <- function(msg, showsd = TRUE) {
574

575
  # Get CV message length
576
  if (length(msg) == 0L) {
577
578
    stop("lgb.cv: size of cv result error")
  }
579

580
  # Get evaluation message length
581
  eval_len <- length(msg[[1L]])
582

583
  # Is evaluation message empty?
584
  if (eval_len == 0L) {
585
586
    stop("lgb.cv: should provide at least one metric for CV")
  }
587

588
  # Get evaluation results using a list apply
589
  eval_result <- lapply(seq_len(eval_len), function(j) {
590
591
    as.numeric(lapply(seq_along(msg), function(i) {
      msg[[i]][[j]]$value }))
Guolin Ke's avatar
Guolin Ke committed
592
  })
593

594
595
  # Get evaluation. Just taking the first element here to
  # get structture (name, higher_bettter, data_name)
596
  ret_eval <- msg[[1L]]
597

598
599
600
601
  # Go through evaluation length items
  for (j in seq_len(eval_len)) {
    ret_eval[[j]]$value <- mean(eval_result[[j]])
  }
602

603
  # Preinit evaluation error
Guolin Ke's avatar
Guolin Ke committed
604
  ret_eval_err <- NULL
605

606
  # Check for standard deviation
607
  if (showsd) {
608

609
    # Parse standard deviation
610
    for (j in seq_len(eval_len)) {
611
612
      ret_eval_err <- c(
        ret_eval_err
613
        , sqrt(mean(eval_result[[j]] ^ 2L) - mean(eval_result[[j]]) ^ 2L)
614
      )
Guolin Ke's avatar
Guolin Ke committed
615
    }
616

617
    # Convert to list
Guolin Ke's avatar
Guolin Ke committed
618
    ret_eval_err <- as.list(ret_eval_err)
619

Guolin Ke's avatar
Guolin Ke committed
620
  }
621

622
  # Return errors
623
624
625
626
  list(
    eval_list = ret_eval
    , eval_err_list = ret_eval_err
  )
627

628
}