lgb.cv.R 18.9 KB
Newer Older
James Lamb's avatar
James Lamb committed
1
2
#' @importFrom R6 R6Class
CVBooster <- R6::R6Class(
3
  classname = "lgb.CVBooster",
4
  cloneable = FALSE,
Guolin Ke's avatar
Guolin Ke committed
5
  public = list(
6
    best_iter = -1L,
7
    best_score = NA,
Guolin Ke's avatar
Guolin Ke committed
8
    record_evals = list(),
9
10
    boosters = list(),
    initialize = function(x) {
Guolin Ke's avatar
Guolin Ke committed
11
      self$boosters <- x
12
    },
13
14
15
    reset_parameter = function(new_params) {
      for (x in boosters) { x$reset_parameter(new_params) }
      self
Guolin Ke's avatar
Guolin Ke committed
16
17
18
19
    }
  )
)

20
#' @name lgb.cv
James Lamb's avatar
James Lamb committed
21
#' @title Main CV logic for LightGBM
James Lamb's avatar
James Lamb committed
22
23
#' @description Cross validation logic used by LightGBM
#' @inheritParams lgb_shared_params
24
#' @param nfold the original dataset is randomly partitioned into \code{nfold} equal size subsamples.
25
#' @param label Vector of labels, used if \code{data} is not an \code{\link{lgb.Dataset}}
26
#' @param weight vector of response values. If not NULL, will set to dataset
27
#' @param record Boolean, TRUE will record iteration message to \code{booster$record_evals}
Guolin Ke's avatar
Guolin Ke committed
28
#' @param showsd \code{boolean}, whether to show standard deviation of cross validation
29
#' @param stratified a \code{boolean} indicating whether sampling of folds should be stratified
30
#'                   by the values of outcome labels.
Guolin Ke's avatar
Guolin Ke committed
31
#' @param folds \code{list} provides a possibility to use a list of pre-defined CV folds
32
33
#'              (each element must be a vector of test fold's indices). When folds are supplied,
#'              the \code{nfold} and \code{stratified} parameters are ignored.
Guolin Ke's avatar
Guolin Ke committed
34
#' @param colnames feature names, if not null, will use this to overwrite the names in dataset
35
36
37
#' @param categorical_feature categorical features. This can either be a character vector of feature
#'                            names or an integer vector with the indices of the features (e.g.
#'                            \code{c(1L, 10L)} to say "the first and tenth columns").
38
39
40
#' @param callbacks List of callback functions that are applied at each iteration.
#' @param reset_data Boolean, setting it to TRUE (not the default value) will transform the booster model
#'                   into a predictor model which frees up memory and the original datasets
James Lamb's avatar
James Lamb committed
41
42
#' @param ... other parameters, see Parameters.rst for more information. A few key parameters:
#'            \itemize{
43
44
45
#'                \item{\code{boosting}: Boosting type. \code{"gbdt"}, \code{"rf"}, \code{"dart"} or \code{"goss"}.}
#'                \item{\code{num_leaves}: Maximum number of leaves in one tree.}
#'                \item{\code{max_depth}: Limit the max depth for tree model. This is used to deal with
James Lamb's avatar
James Lamb committed
46
#'                                 overfit when #data is small. Tree still grow by leaf-wise.}
47
#'                \item{\code{num_threads}: Number of threads for LightGBM. For the best speed, set this to
48
#'                                   the number of real CPU cores, not the number of threads (most
James Lamb's avatar
James Lamb committed
49
50
#'                                   CPU using hyper-threading to generate 2 threads per CPU core).}
#'            }
51
#' @inheritSection lgb_shared_params Early Stopping
52
#' @return a trained model \code{lgb.CVBooster}.
53
#'
Guolin Ke's avatar
Guolin Ke committed
54
#' @examples
55
#' \donttest{
56
57
58
59
#' data(agaricus.train, package = "lightgbm")
#' train <- agaricus.train
#' dtrain <- lgb.Dataset(train$data, label = train$label)
#' params <- list(objective = "regression", metric = "l2")
60
61
62
#' model <- lgb.cv(
#'   params = params
#'   , data = dtrain
63
#'   , nrounds = 5L
64
65
66
#'   , nfold = 3L
#'   , min_data = 1L
#'   , learning_rate = 1.0
67
#' )
68
#' }
69
#' @importFrom data.table data.table setorderv
Guolin Ke's avatar
Guolin Ke committed
70
#' @export
71
72
73
74
75
76
77
78
79
80
81
lgb.cv <- function(params = list()
                   , data
                   , nrounds = 10L
                   , nfold = 3L
                   , label = NULL
                   , weight = NULL
                   , obj = NULL
                   , eval = NULL
                   , verbose = 1L
                   , record = TRUE
                   , eval_freq = 1L
82
                   , showsd = TRUE
83
84
85
86
87
88
89
90
91
92
                   , stratified = TRUE
                   , folds = NULL
                   , init_model = NULL
                   , colnames = NULL
                   , categorical_feature = NULL
                   , early_stopping_rounds = NULL
                   , callbacks = list()
                   , reset_data = FALSE
                   , ...
                   ) {
93

94
95
96
97
98
99
100
101
102
103
104
105
  if (nrounds <= 0L) {
    stop("nrounds should be greater than zero")
  }

  # If 'data' is not an lgb.Dataset, try to construct one using 'label'
  if (!lgb.is.Dataset(data)) {
    if (is.null(label)) {
      stop("'label' must be provided for lgb.cv if 'data' is not an 'lgb.Dataset'")
    }
    data <- lgb.Dataset(data, label = label)
  }

106
  # Setup temporary variables
107
  params <- append(params, list(...))
108
109
110
111
  params$verbose <- verbose
  params <- lgb.check.obj(params, obj)
  params <- lgb.check.eval(params, eval)
  fobj <- NULL
112
  eval_functions <- list(NULL)
113

114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
  # set some parameters, resolving the way they were passed in with other parameters
  # in `params`.
  # this ensures that the model stored with Booster$save() correctly represents
  # what was passed in
  params <- lgb.check.wrapper_param(
    main_param_name = "num_iterations"
    , params = params
    , alternative_kwarg_value = nrounds
  )
  params <- lgb.check.wrapper_param(
    main_param_name = "early_stopping_round"
    , params = params
    , alternative_kwarg_value = early_stopping_rounds
  )
  early_stopping_rounds <- params[["early_stopping_round"]]

130
  # Check for objective (function or not)
131
  if (is.function(params$objective)) {
Guolin Ke's avatar
Guolin Ke committed
132
133
134
    fobj <- params$objective
    params$objective <- "NONE"
  }
135

136
  # If eval is a single function, store it as a 1-element list
137
  # (for backwards compatibility). If it is a list of functions, store
138
139
  # all of them. This makes it possible to pass any mix of strings like "auc"
  # and custom functions to eval
140
  if (is.function(eval)) {
141
142
143
144
145
146
147
    eval_functions <- list(eval)
  }
  if (methods::is(eval, "list")) {
    eval_functions <- Filter(
      f = is.function
      , x = eval
    )
148
  }
149

150
  # Init predictor to empty
Guolin Ke's avatar
Guolin Ke committed
151
  predictor <- NULL
152

153
  # Check for boosting from a trained model
154
  if (is.character(init_model)) {
Guolin Ke's avatar
Guolin Ke committed
155
    predictor <- Predictor$new(init_model)
156
  } else if (lgb.is.Booster(init_model)) {
Guolin Ke's avatar
Guolin Ke committed
157
158
    predictor <- init_model$to_predictor()
  }
159

160
  # Set the iteration to start from / end to (and check for boosting from a trained model, again)
161
  begin_iteration <- 1L
162
  if (!is.null(predictor)) {
163
    begin_iteration <- predictor$current_iter() + 1L
Guolin Ke's avatar
Guolin Ke committed
164
  }
165
  end_iteration <- begin_iteration + params[["num_iterations"]] - 1L
166

167
168
169
170
  # Construct datasets, if needed
  data$update_params(params = params)
  data$construct()

171
172
173
174
175
176
177
178
179
  # Check interaction constraints
  cnames <- NULL
  if (!is.null(colnames)) {
    cnames <- colnames
  } else if (!is.null(data$get_colnames())) {
    cnames <- data$get_colnames()
  }
  params[["interaction_constraints"]] <- lgb.check_interaction_constraints(params, cnames)

180
181
  # Check for weights
  if (!is.null(weight)) {
182
    data$setinfo("weight", weight)
183
  }
184

185
  # Update parameters with parsed parameters
Guolin Ke's avatar
Guolin Ke committed
186
  data$update_params(params)
187

188
  # Create the predictor set
Guolin Ke's avatar
Guolin Ke committed
189
  data$.__enclos_env__$private$set_predictor(predictor)
190

191
192
193
194
  # Write column names
  if (!is.null(colnames)) {
    data$set_colnames(colnames)
  }
195

196
197
198
199
  # Write categorical features
  if (!is.null(categorical_feature)) {
    data$set_categorical_feature(categorical_feature)
  }
200

201
  # Check for folds
202
  if (!is.null(folds)) {
203

204
    # Check for list of folds or for single value
205
    if (!identical(class(folds), "list") || length(folds) < 2L) {
206
      stop(sQuote("folds"), " must be a list with 2 or more elements that are vectors of indices for each CV-fold")
207
    }
208

209
    # Set number of folds
Guolin Ke's avatar
Guolin Ke committed
210
    nfold <- length(folds)
211

Guolin Ke's avatar
Guolin Ke committed
212
  } else {
213

214
    # Check fold value
215
    if (nfold <= 1L) {
216
217
      stop(sQuote("nfold"), " must be > 1")
    }
218

219
    # Create folds
220
    folds <- generate.cv.folds(
221
222
223
224
225
226
      nfold = nfold
      , nrows = nrow(data)
      , stratified = stratified
      , label = getinfo(data, "label")
      , group = getinfo(data, "group")
      , params = params
227
    )
228

Guolin Ke's avatar
Guolin Ke committed
229
  }
230

231
  # Add printing log callback
232
  if (verbose > 0L && eval_freq > 0L) {
Guolin Ke's avatar
Guolin Ke committed
233
234
    callbacks <- add.cb(callbacks, cb.print.evaluation(eval_freq))
  }
235

236
237
238
239
  # Add evaluation log callback
  if (record) {
    callbacks <- add.cb(callbacks, cb.record.evaluation())
  }
240

241
  # Did user pass parameters that indicate they want to use early stopping?
242
  using_early_stopping <- !is.null(early_stopping_rounds) && early_stopping_rounds > 0L
243
244
245
246
247

  boosting_param_names <- .PARAMETER_ALIASES()[["boosting"]]
  using_dart <- any(
    sapply(
      X = boosting_param_names
248
249
      , FUN = function(param) {
        identical(params[[param]], "dart")
250
      }
251
252
253
254
    )
  )

  # Cannot use early stopping with 'dart' boosting
255
  if (using_dart) {
256
    warning("Early stopping is not available in 'dart' mode.")
257
    using_early_stopping <- FALSE
258
259
260

    # Remove the cb.early.stop() function if it was passed in to callbacks
    callbacks <- Filter(
261
      f = function(cb_func) {
262
263
264
265
266
267
268
        !identical(attr(cb_func, "name"), "cb.early.stop")
      }
      , x = callbacks
    )
  }

  # If user supplied early_stopping_rounds, add the early stopping callback
269
  if (using_early_stopping) {
270
271
272
273
    callbacks <- add.cb(
      callbacks
      , cb.early.stop(
        stopping_rounds = early_stopping_rounds
274
        , first_metric_only = isTRUE(params[["first_metric_only"]])
275
276
277
        , verbose = verbose
      )
    )
Guolin Ke's avatar
Guolin Ke committed
278
  }
279

Guolin Ke's avatar
Guolin Ke committed
280
  cb <- categorize.callbacks(callbacks)
281

282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
  # Construct booster for each fold. The data.table() code below is used to
  # guarantee that indices are sorted while keeping init_score and weight together
  # with the correct indices. Note that it takes advantage of the fact that
  # someDT$some_column returns NULL is 'some_column' does not exist in the data.table
  bst_folds <- lapply(
    X = seq_along(folds)
    , FUN = function(k) {

      # For learning-to-rank, each fold is a named list with two elements:
      #   * `fold` = an integer vector of row indices
      #   * `group` = an integer vector describing which groups are in the fold
      # For classification or regression tasks, it will just be an integer
      # vector of row indices
      folds_have_group <- "group" %in% names(folds[[k]])
      if (folds_have_group) {
        test_indices <- folds[[k]]$fold
        test_group_indices <- folds[[k]]$group
        test_groups <- getinfo(data, "group")[test_group_indices]
        train_groups <- getinfo(data, "group")[-test_group_indices]
      } else {
        test_indices <- folds[[k]]
      }
      train_indices <- seq_len(nrow(data))[-test_indices]

      # set up test set
      indexDT <- data.table::data.table(
        indices = test_indices
        , weight = getinfo(data, "weight")[test_indices]
        , init_score = getinfo(data, "init_score")[test_indices]
      )
      data.table::setorderv(indexDT, "indices", order = 1L)
      dtest <- slice(data, indexDT$indices)
      setinfo(dtest, "weight", indexDT$weight)
      setinfo(dtest, "init_score", indexDT$init_score)

      # set up training set
      indexDT <- data.table::data.table(
        indices = train_indices
        , weight = getinfo(data, "weight")[train_indices]
        , init_score = getinfo(data, "init_score")[train_indices]
      )
      data.table::setorderv(indexDT, "indices", order = 1L)
      dtrain <- slice(data, indexDT$indices)
      setinfo(dtrain, "weight", indexDT$weight)
      setinfo(dtrain, "init_score", indexDT$init_score)

      if (folds_have_group) {
        setinfo(dtest, "group", test_groups)
        setinfo(dtrain, "group", train_groups)
      }

333
334
      booster <- Booster$new(params, dtrain)
      booster$add_valid(dtest, "valid")
335
336
337
338
339
      return(
        list(booster = booster)
      )
    }
  )
340

341
  # Create new booster
Guolin Ke's avatar
Guolin Ke committed
342
  cv_booster <- CVBooster$new(bst_folds)
343

344
345
346
  # Callback env
  env <- CB_ENV$new()
  env$model <- cv_booster
Guolin Ke's avatar
Guolin Ke committed
347
  env$begin_iteration <- begin_iteration
348
  env$end_iteration <- end_iteration
349

350
  # Start training model using number of iterations to start and end with
351
  for (i in seq.int(from = begin_iteration, to = end_iteration)) {
352

353
    # Overwrite iteration in environment
Guolin Ke's avatar
Guolin Ke committed
354
355
    env$iteration <- i
    env$eval_list <- list()
356

357
358
359
    for (f in cb$pre_iter) {
      f(env)
    }
360

361
    # Update one boosting iteration
Guolin Ke's avatar
Guolin Ke committed
362
    msg <- lapply(cv_booster$boosters, function(fd) {
363
      fd$booster$update(fobj = fobj)
364
365
366
367
368
      out <- list()
      for (eval_function in eval_functions) {
        out <- append(out, fd$booster$eval_valid(feval = eval_function))
      }
      return(out)
Guolin Ke's avatar
Guolin Ke committed
369
    })
370

371
    # Prepare collection of evaluation results
Guolin Ke's avatar
Guolin Ke committed
372
    merged_msg <- lgb.merge.cv.result(msg)
373

374
    # Write evaluation result in environment
Guolin Ke's avatar
Guolin Ke committed
375
    env$eval_list <- merged_msg$eval_list
376

377
    # Check for standard deviation requirement
378
    if (showsd) {
379
380
      env$eval_err_list <- merged_msg$eval_err_list
    }
381

382
383
384
385
    # Loop through env
    for (f in cb$post_iter) {
      f(env)
    }
386

387
    # Check for early stopping and break if needed
388
    if (env$met_early_stop) break
389

Guolin Ke's avatar
Guolin Ke committed
390
  }
391

392
393
  # When early stopping is not activated, we compute the best iteration / score ourselves
  # based on the first first metric
394
  if (record && is.na(env$best_score)) {
395
396
397
398
399
400
401
    # when using a custom eval function, the metric name is returned from the
    # function, so figure it out from record_evals
    if (!is.null(eval_functions[1L])) {
      first_metric <- names(cv_booster$record_evals[["valid"]])[1L]
    } else {
      first_metric <- cv_booster$.__enclos_env__$private$eval_names[1L]
    }
402
403
404
    .find_best <- which.min
    if (isTRUE(env$eval_list[[1L]]$higher_better[1L])) {
      .find_best <- which.max
405
    }
406
407
408
409
410
411
412
413
    cv_booster$best_iter <- unname(
      .find_best(
        unlist(
          cv_booster$record_evals[["valid"]][[first_metric]][[.EVAL_KEY()]]
        )
      )
    )
    cv_booster$best_score <- cv_booster$record_evals[["valid"]][[first_metric]][[.EVAL_KEY()]][[cv_booster$best_iter]]
414
  }
415

416
417
418
  if (reset_data) {
    lapply(cv_booster$boosters, function(fd) {
      # Store temporarily model data elsewhere
419
420
      booster_old <- list(
        best_iter = fd$booster$best_iter
421
        , best_score = fd$booster$best_score
422
423
        , record_evals = fd$booster$record_evals
      )
424
425
426
427
428
429
430
      # Reload model
      fd$booster <- lgb.load(model_str = fd$booster$save_model_to_string())
      fd$booster$best_iter <- booster_old$best_iter
      fd$booster$best_score <- booster_old$best_score
      fd$booster$record_evals <- booster_old$record_evals
    })
  }
431

432
  return(cv_booster)
433

Guolin Ke's avatar
Guolin Ke committed
434
435
436
}

# Generates random (stratified if needed) CV folds
437
generate.cv.folds <- function(nfold, nrows, stratified, label, group, params) {
438

439
440
  # Check for group existence
  if (is.null(group)) {
441

442
    # Shuffle
443
    rnd_idx <- sample.int(nrows)
444

445
    # Request stratified folds
446
    if (isTRUE(stratified) && params$objective %in% c("binary", "multiclass") && length(label) == length(rnd_idx)) {
447

448
      y <- label[rnd_idx]
449
      y <- as.factor(y)
450
      folds <- lgb.stratified.folds(y, nfold)
451

452
    } else {
453

454
455
      # Make simple non-stratified folds
      folds <- list()
456

457
      # Loop through each fold
458
      for (i in seq_len(nfold)) {
459
        kstep <- length(rnd_idx) %/% (nfold - i + 1L)
460
        folds[[i]] <- rnd_idx[seq_len(kstep)]
461
        rnd_idx <- rnd_idx[-seq_len(kstep)]
462
      }
463

464
    }
465

Guolin Ke's avatar
Guolin Ke committed
466
  } else {
467

468
469
    # When doing group, stratified is not possible (only random selection)
    if (nfold > length(group)) {
470
      stop("\nYou requested too many folds for the number of available groups.\n")
471
    }
472

473
    # Degroup the groups
474
    ungrouped <- inverse.rle(list(lengths = group, values = seq_along(group)))
475

476
    # Can't stratify, shuffle
477
    rnd_idx <- sample.int(length(group))
478

479
    # Make simple non-stratified folds
Guolin Ke's avatar
Guolin Ke committed
480
    folds <- list()
481

482
    # Loop through each fold
483
    for (i in seq_len(nfold)) {
484
      kstep <- length(rnd_idx) %/% (nfold - i + 1L)
485
486
487
488
      folds[[i]] <- list(
        fold = which(ungrouped %in% rnd_idx[seq_len(kstep)])
        , group = rnd_idx[seq_len(kstep)]
      )
489
      rnd_idx <- rnd_idx[-seq_len(kstep)]
Guolin Ke's avatar
Guolin Ke committed
490
    }
491

Guolin Ke's avatar
Guolin Ke committed
492
  }
493

494
  return(folds)
495

Guolin Ke's avatar
Guolin Ke committed
496
497
498
}

# Creates CV folds stratified by the values of y.
499
# It was borrowed from caret::createFolds and simplified
Guolin Ke's avatar
Guolin Ke committed
500
# by always returning an unnamed list of fold indices.
501
#' @importFrom stats quantile
502
lgb.stratified.folds <- function(y, k = 10L) {
503

504
505
506
507
508
509
510
511
  ## Group the numeric data based on their magnitudes
  ## and sample within those groups.
  ## When the number of samples is low, we may have
  ## issues further slicing the numeric data into
  ## groups. The number of groups will depend on the
  ## ratio of the number of folds to the sample size.
  ## At most, we will use quantiles. If the sample
  ## is too small, we just do regular unstratified CV
Guolin Ke's avatar
Guolin Ke committed
512
  if (is.numeric(y)) {
513

514
    cuts <- length(y) %/% k
515
516
    if (cuts < 2L) {
      cuts <- 2L
517
    }
518
519
    if (cuts > 5L) {
      cuts <- 5L
520
521
522
    }
    y <- cut(
      y
523
      , unique(stats::quantile(y, probs = seq.int(0.0, 1.0, length.out = cuts)))
524
525
      , include.lowest = TRUE
    )
526

Guolin Ke's avatar
Guolin Ke committed
527
  }
528

Guolin Ke's avatar
Guolin Ke committed
529
  if (k < length(y)) {
530

531
    ## Reset levels so that the possible levels and
Guolin Ke's avatar
Guolin Ke committed
532
    ## the levels in the vector are the same
533
    y <- as.factor(as.character(y))
Guolin Ke's avatar
Guolin Ke committed
534
535
    numInClass <- table(y)
    foldVector <- vector(mode = "integer", length(y))
536

Guolin Ke's avatar
Guolin Ke committed
537
538
539
    ## For each class, balance the fold allocation as far
    ## as possible, then resample the remainder.
    ## The final assignment of folds is also randomized.
540

541
    for (i in seq_along(numInClass)) {
542

543
      ## Create a vector of integers from 1:k as many times as possible without
Guolin Ke's avatar
Guolin Ke committed
544
545
      ## going over the number of samples in the class. Note that if the number
      ## of samples in a class is less than k, nothing is producd here.
546
      seqVector <- rep(seq_len(k), numInClass[i] %/% k)
547

548
      ## Add enough random integers to get  length(seqVector) == numInClass[i]
549
      if (numInClass[i] %% k > 0L) {
550
        seqVector <- c(seqVector, sample.int(k, numInClass[i] %% k))
551
      }
552

553
      ## Shuffle the integers for fold assignment and assign to this classes's data
554
      foldVector[y == dimnames(numInClass)$y[i]] <- sample(seqVector)
555

Guolin Ke's avatar
Guolin Ke committed
556
    }
557

Guolin Ke's avatar
Guolin Ke committed
558
  } else {
559

Guolin Ke's avatar
Guolin Ke committed
560
    foldVector <- seq(along = y)
561

Guolin Ke's avatar
Guolin Ke committed
562
  }
563

Guolin Ke's avatar
Guolin Ke committed
564
  out <- split(seq(along = y), foldVector)
565
566
  names(out) <- NULL
  out
Guolin Ke's avatar
Guolin Ke committed
567
568
}

569
lgb.merge.cv.result <- function(msg, showsd = TRUE) {
570

571
  # Get CV message length
572
  if (length(msg) == 0L) {
573
574
    stop("lgb.cv: size of cv result error")
  }
575

576
  # Get evaluation message length
577
  eval_len <- length(msg[[1L]])
578

579
  # Is evaluation message empty?
580
  if (eval_len == 0L) {
581
582
    stop("lgb.cv: should provide at least one metric for CV")
  }
583

584
  # Get evaluation results using a list apply
585
  eval_result <- lapply(seq_len(eval_len), function(j) {
586
587
    as.numeric(lapply(seq_along(msg), function(i) {
      msg[[i]][[j]]$value }))
Guolin Ke's avatar
Guolin Ke committed
588
  })
589

590
  # Get evaluation. Just taking the first element here to
591
  # get structure (name, higher_better, data_name)
592
  ret_eval <- msg[[1L]]
593

594
595
596
597
  # Go through evaluation length items
  for (j in seq_len(eval_len)) {
    ret_eval[[j]]$value <- mean(eval_result[[j]])
  }
598

Guolin Ke's avatar
Guolin Ke committed
599
  ret_eval_err <- NULL
600

601
  # Check for standard deviation
602
  if (showsd) {
603

604
    # Parse standard deviation
605
    for (j in seq_len(eval_len)) {
606
607
      ret_eval_err <- c(
        ret_eval_err
608
        , sqrt(mean(eval_result[[j]] ^ 2L) - mean(eval_result[[j]]) ^ 2L)
609
      )
Guolin Ke's avatar
Guolin Ke committed
610
    }
611

612
    # Convert to list
Guolin Ke's avatar
Guolin Ke committed
613
    ret_eval_err <- as.list(ret_eval_err)
614

Guolin Ke's avatar
Guolin Ke committed
615
  }
616

617
  # Return errors
618
619
620
621
  list(
    eval_list = ret_eval
    , eval_err_list = ret_eval_err
  )
622

623
}