lgb.cv.R 18.8 KB
Newer Older
James Lamb's avatar
James Lamb committed
1
2
#' @importFrom R6 R6Class
CVBooster <- R6::R6Class(
3
  classname = "lgb.CVBooster",
4
  cloneable = FALSE,
Guolin Ke's avatar
Guolin Ke committed
5
  public = list(
6
    best_iter = -1L,
7
    best_score = NA,
Guolin Ke's avatar
Guolin Ke committed
8
    record_evals = list(),
9
10
    boosters = list(),
    initialize = function(x) {
Guolin Ke's avatar
Guolin Ke committed
11
      self$boosters <- x
12
    },
13
14
15
    reset_parameter = function(new_params) {
      for (x in boosters) { x$reset_parameter(new_params) }
      self
Guolin Ke's avatar
Guolin Ke committed
16
17
18
19
    }
  )
)

20
#' @name lgb.cv
James Lamb's avatar
James Lamb committed
21
#' @title Main CV logic for LightGBM
James Lamb's avatar
James Lamb committed
22
23
#' @description Cross validation logic used by LightGBM
#' @inheritParams lgb_shared_params
24
#' @param nfold the original dataset is randomly partitioned into \code{nfold} equal size subsamples.
25
#' @param label Vector of labels, used if \code{data} is not an \code{\link{lgb.Dataset}}
26
#' @param weight vector of response values. If not NULL, will set to dataset
27
#' @param obj objective function, can be character or custom objective function. Examples include
28
29
#'            \code{regression}, \code{regression_l1}, \code{huber},
#'             \code{binary}, \code{lambdarank}, \code{multiclass}, \code{multiclass}
Guolin Ke's avatar
Guolin Ke committed
30
#' @param eval evaluation function, can be (list of) character or custom eval function
31
#' @param record Boolean, TRUE will record iteration message to \code{booster$record_evals}
Guolin Ke's avatar
Guolin Ke committed
32
#' @param showsd \code{boolean}, whether to show standard deviation of cross validation
33
#' @param stratified a \code{boolean} indicating whether sampling of folds should be stratified
34
#'                   by the values of outcome labels.
Guolin Ke's avatar
Guolin Ke committed
35
#' @param folds \code{list} provides a possibility to use a list of pre-defined CV folds
36
37
#'              (each element must be a vector of test fold's indices). When folds are supplied,
#'              the \code{nfold} and \code{stratified} parameters are ignored.
Guolin Ke's avatar
Guolin Ke committed
38
#' @param colnames feature names, if not null, will use this to overwrite the names in dataset
39
40
41
#' @param categorical_feature categorical features. This can either be a character vector of feature
#'                            names or an integer vector with the indices of the features (e.g.
#'                            \code{c(1L, 10L)} to say "the first and tenth columns").
42
43
44
#' @param callbacks List of callback functions that are applied at each iteration.
#' @param reset_data Boolean, setting it to TRUE (not the default value) will transform the booster model
#'                   into a predictor model which frees up memory and the original datasets
James Lamb's avatar
James Lamb committed
45
46
#' @param ... other parameters, see Parameters.rst for more information. A few key parameters:
#'            \itemize{
47
48
49
#'                \item{\code{boosting}: Boosting type. \code{"gbdt"}, \code{"rf"}, \code{"dart"} or \code{"goss"}.}
#'                \item{\code{num_leaves}: Maximum number of leaves in one tree.}
#'                \item{\code{max_depth}: Limit the max depth for tree model. This is used to deal with
James Lamb's avatar
James Lamb committed
50
#'                                 overfit when #data is small. Tree still grow by leaf-wise.}
51
#'                \item{\code{num_threads}: Number of threads for LightGBM. For the best speed, set this to
52
#'                                   the number of real CPU cores, not the number of threads (most
James Lamb's avatar
James Lamb committed
53
54
#'                                   CPU using hyper-threading to generate 2 threads per CPU core).}
#'            }
55
#'
56
#' @return a trained model \code{lgb.CVBooster}.
57
#'
Guolin Ke's avatar
Guolin Ke committed
58
#' @examples
59
#' \dontrun{
60
61
62
63
#' data(agaricus.train, package = "lightgbm")
#' train <- agaricus.train
#' dtrain <- lgb.Dataset(train$data, label = train$label)
#' params <- list(objective = "regression", metric = "l2")
64
65
66
#' model <- lgb.cv(
#'   params = params
#'   , data = dtrain
67
#'   , nrounds = 5L
68
69
70
#'   , nfold = 3L
#'   , min_data = 1L
#'   , learning_rate = 1.0
71
#' )
72
#' }
73
#' @importFrom data.table data.table setorderv
Guolin Ke's avatar
Guolin Ke committed
74
#' @export
75
76
77
78
79
80
81
82
83
84
85
lgb.cv <- function(params = list()
                   , data
                   , nrounds = 10L
                   , nfold = 3L
                   , label = NULL
                   , weight = NULL
                   , obj = NULL
                   , eval = NULL
                   , verbose = 1L
                   , record = TRUE
                   , eval_freq = 1L
86
                   , showsd = TRUE
87
88
89
90
91
92
93
94
95
96
                   , stratified = TRUE
                   , folds = NULL
                   , init_model = NULL
                   , colnames = NULL
                   , categorical_feature = NULL
                   , early_stopping_rounds = NULL
                   , callbacks = list()
                   , reset_data = FALSE
                   , ...
                   ) {
97

98
99
100
101
102
103
104
105
106
107
108
109
110
  # validate parameters
  if (nrounds <= 0L) {
    stop("nrounds should be greater than zero")
  }

  # If 'data' is not an lgb.Dataset, try to construct one using 'label'
  if (!lgb.is.Dataset(data)) {
    if (is.null(label)) {
      stop("'label' must be provided for lgb.cv if 'data' is not an 'lgb.Dataset'")
    }
    data <- lgb.Dataset(data, label = label)
  }

111
  # Setup temporary variables
112
  params <- append(params, list(...))
113
114
115
116
117
  params$verbose <- verbose
  params <- lgb.check.obj(params, obj)
  params <- lgb.check.eval(params, eval)
  fobj <- NULL
  feval <- NULL
118

119
  # Check for objective (function or not)
120
  if (is.function(params$objective)) {
Guolin Ke's avatar
Guolin Ke committed
121
122
123
    fobj <- params$objective
    params$objective <- "NONE"
  }
124

125
126
127
128
  # Check for loss (function or not)
  if (is.function(eval)) {
    feval <- eval
  }
129

130
  # Init predictor to empty
Guolin Ke's avatar
Guolin Ke committed
131
  predictor <- NULL
132

133
  # Check for boosting from a trained model
134
  if (is.character(init_model)) {
Guolin Ke's avatar
Guolin Ke committed
135
    predictor <- Predictor$new(init_model)
136
  } else if (lgb.is.Booster(init_model)) {
Guolin Ke's avatar
Guolin Ke committed
137
138
    predictor <- init_model$to_predictor()
  }
139

140
  # Set the iteration to start from / end to (and check for boosting from a trained model, again)
141
  begin_iteration <- 1L
142
  if (!is.null(predictor)) {
143
    begin_iteration <- predictor$current_iter() + 1L
Guolin Ke's avatar
Guolin Ke committed
144
  }
145
  # Check for number of rounds passed as parameter - in case there are multiple ones, take only the first one
146
  n_trees <- .PARAMETER_ALIASES()[["num_iterations"]]
147
  if (any(names(params) %in% n_trees)) {
148
    end_iteration <- begin_iteration + params[[which(names(params) %in% n_trees)[1L]]] - 1L
149
  } else {
150
    end_iteration <- begin_iteration + nrounds - 1L
151
  }
152

153
154
155
156
157
158
159
160
161
  # Check interaction constraints
  cnames <- NULL
  if (!is.null(colnames)) {
    cnames <- colnames
  } else if (!is.null(data$get_colnames())) {
    cnames <- data$get_colnames()
  }
  params[["interaction_constraints"]] <- lgb.check_interaction_constraints(params, cnames)

162
163
  # Check for weights
  if (!is.null(weight)) {
164
    data$setinfo("weight", weight)
165
  }
166

167
  # Update parameters with parsed parameters
Guolin Ke's avatar
Guolin Ke committed
168
  data$update_params(params)
169

170
  # Create the predictor set
Guolin Ke's avatar
Guolin Ke committed
171
  data$.__enclos_env__$private$set_predictor(predictor)
172

173
174
175
176
  # Write column names
  if (!is.null(colnames)) {
    data$set_colnames(colnames)
  }
177

178
179
180
181
  # Write categorical features
  if (!is.null(categorical_feature)) {
    data$set_categorical_feature(categorical_feature)
  }
182

183
  # Construct datasets, if needed
Guolin Ke's avatar
Guolin Ke committed
184
  data$construct()
185

186
  # Check for folds
187
  if (!is.null(folds)) {
188

189
    # Check for list of folds or for single value
190
    if (!identical(class(folds), "list") || length(folds) < 2L) {
191
      stop(sQuote("folds"), " must be a list with 2 or more elements that are vectors of indices for each CV-fold")
192
    }
193

194
    # Set number of folds
Guolin Ke's avatar
Guolin Ke committed
195
    nfold <- length(folds)
196

Guolin Ke's avatar
Guolin Ke committed
197
  } else {
198

199
    # Check fold value
200
    if (nfold <= 1L) {
201
202
      stop(sQuote("nfold"), " must be > 1")
    }
203

204
    # Create folds
205
    folds <- generate.cv.folds(
206
207
208
209
210
211
      nfold = nfold
      , nrows = nrow(data)
      , stratified = stratified
      , label = getinfo(data, "label")
      , group = getinfo(data, "group")
      , params = params
212
    )
213

Guolin Ke's avatar
Guolin Ke committed
214
  }
215

216
  # Add printing log callback
217
  if (verbose > 0L && eval_freq > 0L) {
Guolin Ke's avatar
Guolin Ke committed
218
219
    callbacks <- add.cb(callbacks, cb.print.evaluation(eval_freq))
  }
220

221
222
223
224
  # Add evaluation log callback
  if (record) {
    callbacks <- add.cb(callbacks, cb.record.evaluation())
  }
225

226
227
228
229
230
  # If early stopping was passed as a parameter in params(), prefer that to keyword argument
  # early_stopping_rounds by overwriting the value in 'early_stopping_rounds'
  early_stop <- .PARAMETER_ALIASES()[["early_stopping_round"]]
  early_stop_param_indx <- names(params) %in% early_stop
  if (any(early_stop_param_indx)) {
231
    first_early_stop_param <- which(early_stop_param_indx)[[1L]]
232
233
234
235
236
    first_early_stop_param_name <- names(params)[[first_early_stop_param]]
    early_stopping_rounds <- params[[first_early_stop_param_name]]
  }

  # Did user pass parameters that indicate they want to use early stopping?
237
  using_early_stopping_via_args <- !is.null(early_stopping_rounds) && early_stopping_rounds > 0L
238
239
240
241
242

  boosting_param_names <- .PARAMETER_ALIASES()[["boosting"]]
  using_dart <- any(
    sapply(
      X = boosting_param_names
243
244
      , FUN = function(param) {
        identical(params[[param]], "dart")
245
      }
246
247
248
249
    )
  )

  # Cannot use early stopping with 'dart' boosting
250
  if (using_dart) {
251
252
253
254
255
    warning("Early stopping is not available in 'dart' mode.")
    using_early_stopping_via_args <- FALSE

    # Remove the cb.early.stop() function if it was passed in to callbacks
    callbacks <- Filter(
256
      f = function(cb_func) {
257
258
259
260
261
262
263
        !identical(attr(cb_func, "name"), "cb.early.stop")
      }
      , x = callbacks
    )
  }

  # If user supplied early_stopping_rounds, add the early stopping callback
264
  if (using_early_stopping_via_args) {
265
266
267
268
269
270
271
    callbacks <- add.cb(
      callbacks
      , cb.early.stop(
        stopping_rounds = early_stopping_rounds
        , verbose = verbose
      )
    )
Guolin Ke's avatar
Guolin Ke committed
272
  }
273

274
  # Categorize callbacks
Guolin Ke's avatar
Guolin Ke committed
275
  cb <- categorize.callbacks(callbacks)
276

277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
  # Construct booster for each fold. The data.table() code below is used to
  # guarantee that indices are sorted while keeping init_score and weight together
  # with the correct indices. Note that it takes advantage of the fact that
  # someDT$some_column returns NULL is 'some_column' does not exist in the data.table
  bst_folds <- lapply(
    X = seq_along(folds)
    , FUN = function(k) {

      # For learning-to-rank, each fold is a named list with two elements:
      #   * `fold` = an integer vector of row indices
      #   * `group` = an integer vector describing which groups are in the fold
      # For classification or regression tasks, it will just be an integer
      # vector of row indices
      folds_have_group <- "group" %in% names(folds[[k]])
      if (folds_have_group) {
        test_indices <- folds[[k]]$fold
        test_group_indices <- folds[[k]]$group
        test_groups <- getinfo(data, "group")[test_group_indices]
        train_groups <- getinfo(data, "group")[-test_group_indices]
      } else {
        test_indices <- folds[[k]]
      }
      train_indices <- seq_len(nrow(data))[-test_indices]

      # set up test set
      indexDT <- data.table::data.table(
        indices = test_indices
        , weight = getinfo(data, "weight")[test_indices]
        , init_score = getinfo(data, "init_score")[test_indices]
      )
      data.table::setorderv(indexDT, "indices", order = 1L)
      dtest <- slice(data, indexDT$indices)
      setinfo(dtest, "weight", indexDT$weight)
      setinfo(dtest, "init_score", indexDT$init_score)

      # set up training set
      indexDT <- data.table::data.table(
        indices = train_indices
        , weight = getinfo(data, "weight")[train_indices]
        , init_score = getinfo(data, "init_score")[train_indices]
      )
      data.table::setorderv(indexDT, "indices", order = 1L)
      dtrain <- slice(data, indexDT$indices)
      setinfo(dtrain, "weight", indexDT$weight)
      setinfo(dtrain, "init_score", indexDT$init_score)

      if (folds_have_group) {
        setinfo(dtest, "group", test_groups)
        setinfo(dtrain, "group", train_groups)
      }

328
329
      booster <- Booster$new(params, dtrain)
      booster$add_valid(dtest, "valid")
330
331
332
333
334
      return(
        list(booster = booster)
      )
    }
  )
335

336
  # Create new booster
Guolin Ke's avatar
Guolin Ke committed
337
  cv_booster <- CVBooster$new(bst_folds)
338

339
340
341
  # Callback env
  env <- CB_ENV$new()
  env$model <- cv_booster
Guolin Ke's avatar
Guolin Ke committed
342
  env$begin_iteration <- begin_iteration
343
  env$end_iteration <- end_iteration
344

345
  # Start training model using number of iterations to start and end with
346
  for (i in seq.int(from = begin_iteration, to = end_iteration)) {
347

348
    # Overwrite iteration in environment
Guolin Ke's avatar
Guolin Ke committed
349
350
    env$iteration <- i
    env$eval_list <- list()
351

352
353
354
355
    # Loop through "pre_iter" element
    for (f in cb$pre_iter) {
      f(env)
    }
356

357
    # Update one boosting iteration
Guolin Ke's avatar
Guolin Ke committed
358
    msg <- lapply(cv_booster$boosters, function(fd) {
359
360
      fd$booster$update(fobj = fobj)
      fd$booster$eval_valid(feval = feval)
Guolin Ke's avatar
Guolin Ke committed
361
    })
362

363
    # Prepare collection of evaluation results
Guolin Ke's avatar
Guolin Ke committed
364
    merged_msg <- lgb.merge.cv.result(msg)
365

366
    # Write evaluation result in environment
Guolin Ke's avatar
Guolin Ke committed
367
    env$eval_list <- merged_msg$eval_list
368

369
    # Check for standard deviation requirement
370
    if (showsd) {
371
372
      env$eval_err_list <- merged_msg$eval_err_list
    }
373

374
375
376
377
    # Loop through env
    for (f in cb$post_iter) {
      f(env)
    }
378

379
    # Check for early stopping and break if needed
380
    if (env$met_early_stop) break
381

Guolin Ke's avatar
Guolin Ke committed
382
  }
383

384
385
  # When early stopping is not activated, we compute the best iteration / score ourselves
  # based on the first first metric
386
  if (record && is.na(env$best_score)) {
387
388
389
390
    first_metric <- cv_booster$boosters[[1L]][[1L]]$.__enclos_env__$private$eval_names[1L]
    .find_best <- which.min
    if (isTRUE(env$eval_list[[1L]]$higher_better[1L])) {
      .find_best <- which.max
391
    }
392
393
394
395
396
397
398
399
    cv_booster$best_iter <- unname(
      .find_best(
        unlist(
          cv_booster$record_evals[["valid"]][[first_metric]][[.EVAL_KEY()]]
        )
      )
    )
    cv_booster$best_score <- cv_booster$record_evals[["valid"]][[first_metric]][[.EVAL_KEY()]][[cv_booster$best_iter]]
400
  }
401

402
403
404
  if (reset_data) {
    lapply(cv_booster$boosters, function(fd) {
      # Store temporarily model data elsewhere
405
406
      booster_old <- list(
        best_iter = fd$booster$best_iter
407
        , best_score = fd$booster$best_score
408
409
        , record_evals = fd$booster$record_evals
      )
410
411
412
413
414
415
416
      # Reload model
      fd$booster <- lgb.load(model_str = fd$booster$save_model_to_string())
      fd$booster$best_iter <- booster_old$best_iter
      fd$booster$best_score <- booster_old$best_score
      fd$booster$record_evals <- booster_old$record_evals
    })
  }
417

418
419
  # Return booster
  return(cv_booster)
420

Guolin Ke's avatar
Guolin Ke committed
421
422
423
}

# Generates random (stratified if needed) CV folds
424
generate.cv.folds <- function(nfold, nrows, stratified, label, group, params) {
425

426
427
  # Check for group existence
  if (is.null(group)) {
428

429
    # Shuffle
430
    rnd_idx <- sample.int(nrows)
431

432
    # Request stratified folds
433
    if (isTRUE(stratified) && params$objective %in% c("binary", "multiclass") && length(label) == length(rnd_idx)) {
434

435
436
437
      y <- label[rnd_idx]
      y <- factor(y)
      folds <- lgb.stratified.folds(y, nfold)
438

439
    } else {
440

441
442
      # Make simple non-stratified folds
      folds <- list()
443

444
      # Loop through each fold
445
      for (i in seq_len(nfold)) {
446
        kstep <- length(rnd_idx) %/% (nfold - i + 1L)
447
        folds[[i]] <- rnd_idx[seq_len(kstep)]
448
        rnd_idx <- rnd_idx[-seq_len(kstep)]
449
      }
450

451
    }
452

Guolin Ke's avatar
Guolin Ke committed
453
  } else {
454

455
456
457
458
    # When doing group, stratified is not possible (only random selection)
    if (nfold > length(group)) {
      stop("\n\tYou requested too many folds for the number of available groups.\n")
    }
459

460
    # Degroup the groups
461
    ungrouped <- inverse.rle(list(lengths = group, values = seq_along(group)))
462

463
    # Can't stratify, shuffle
464
    rnd_idx <- sample.int(length(group))
465

466
    # Make simple non-stratified folds
Guolin Ke's avatar
Guolin Ke committed
467
    folds <- list()
468

469
    # Loop through each fold
470
    for (i in seq_len(nfold)) {
471
      kstep <- length(rnd_idx) %/% (nfold - i + 1L)
472
473
474
475
      folds[[i]] <- list(
        fold = which(ungrouped %in% rnd_idx[seq_len(kstep)])
        , group = rnd_idx[seq_len(kstep)]
      )
476
      rnd_idx <- rnd_idx[-seq_len(kstep)]
Guolin Ke's avatar
Guolin Ke committed
477
    }
478

Guolin Ke's avatar
Guolin Ke committed
479
  }
480

481
482
  # Return folds
  return(folds)
483

Guolin Ke's avatar
Guolin Ke committed
484
485
486
487
488
}

# Creates CV folds stratified by the values of y.
# It was borrowed from caret::lgb.stratified.folds and simplified
# by always returning an unnamed list of fold indices.
489
#' @importFrom stats quantile
490
lgb.stratified.folds <- function(y, k = 10L) {
491

492
493
494
495
496
497
498
499
  ## Group the numeric data based on their magnitudes
  ## and sample within those groups.
  ## When the number of samples is low, we may have
  ## issues further slicing the numeric data into
  ## groups. The number of groups will depend on the
  ## ratio of the number of folds to the sample size.
  ## At most, we will use quantiles. If the sample
  ## is too small, we just do regular unstratified CV
Guolin Ke's avatar
Guolin Ke committed
500
  if (is.numeric(y)) {
501

502
    cuts <- length(y) %/% k
503
504
    if (cuts < 2L) {
      cuts <- 2L
505
    }
506
507
    if (cuts > 5L) {
      cuts <- 5L
508
509
510
    }
    y <- cut(
      y
511
      , unique(stats::quantile(y, probs = seq.int(0.0, 1.0, length.out = cuts)))
512
513
      , include.lowest = TRUE
    )
514

Guolin Ke's avatar
Guolin Ke committed
515
  }
516

Guolin Ke's avatar
Guolin Ke committed
517
  if (k < length(y)) {
518

519
    ## Reset levels so that the possible levels and
Guolin Ke's avatar
Guolin Ke committed
520
521
522
523
    ## the levels in the vector are the same
    y <- factor(as.character(y))
    numInClass <- table(y)
    foldVector <- vector(mode = "integer", length(y))
524

Guolin Ke's avatar
Guolin Ke committed
525
526
527
    ## For each class, balance the fold allocation as far
    ## as possible, then resample the remainder.
    ## The final assignment of folds is also randomized.
528

529
    for (i in seq_along(numInClass)) {
530

531
      ## Create a vector of integers from 1:k as many times as possible without
Guolin Ke's avatar
Guolin Ke committed
532
533
      ## going over the number of samples in the class. Note that if the number
      ## of samples in a class is less than k, nothing is producd here.
534
      seqVector <- rep(seq_len(k), numInClass[i] %/% k)
535

536
      ## Add enough random integers to get  length(seqVector) == numInClass[i]
537
      if (numInClass[i] %% k > 0L) {
538
        seqVector <- c(seqVector, sample.int(k, numInClass[i] %% k))
539
      }
540

541
      ## Shuffle the integers for fold assignment and assign to this classes's data
542
      foldVector[y == dimnames(numInClass)$y[i]] <- sample(seqVector)
543

Guolin Ke's avatar
Guolin Ke committed
544
    }
545

Guolin Ke's avatar
Guolin Ke committed
546
  } else {
547

Guolin Ke's avatar
Guolin Ke committed
548
    foldVector <- seq(along = y)
549

Guolin Ke's avatar
Guolin Ke committed
550
  }
551

552
  # Return data
Guolin Ke's avatar
Guolin Ke committed
553
  out <- split(seq(along = y), foldVector)
554
555
  names(out) <- NULL
  out
Guolin Ke's avatar
Guolin Ke committed
556
557
}

558
lgb.merge.cv.result <- function(msg, showsd = TRUE) {
559

560
  # Get CV message length
561
  if (length(msg) == 0L) {
562
563
    stop("lgb.cv: size of cv result error")
  }
564

565
  # Get evaluation message length
566
  eval_len <- length(msg[[1L]])
567

568
  # Is evaluation message empty?
569
  if (eval_len == 0L) {
570
571
    stop("lgb.cv: should provide at least one metric for CV")
  }
572

573
  # Get evaluation results using a list apply
574
  eval_result <- lapply(seq_len(eval_len), function(j) {
575
576
    as.numeric(lapply(seq_along(msg), function(i) {
      msg[[i]][[j]]$value }))
Guolin Ke's avatar
Guolin Ke committed
577
  })
578

579
  # Get evaluation
580
  ret_eval <- msg[[1L]]
581

582
583
584
585
  # Go through evaluation length items
  for (j in seq_len(eval_len)) {
    ret_eval[[j]]$value <- mean(eval_result[[j]])
  }
586

587
  # Preinit evaluation error
Guolin Ke's avatar
Guolin Ke committed
588
  ret_eval_err <- NULL
589

590
  # Check for standard deviation
591
  if (showsd) {
592

593
    # Parse standard deviation
594
    for (j in seq_len(eval_len)) {
595
596
      ret_eval_err <- c(
        ret_eval_err
597
        , sqrt(mean(eval_result[[j]] ^ 2L) - mean(eval_result[[j]]) ^ 2L)
598
      )
Guolin Ke's avatar
Guolin Ke committed
599
    }
600

601
    # Convert to list
Guolin Ke's avatar
Guolin Ke committed
602
    ret_eval_err <- as.list(ret_eval_err)
603

Guolin Ke's avatar
Guolin Ke committed
604
  }
605

606
  # Return errors
607
608
609
610
  list(
    eval_list = ret_eval
    , eval_err_list = ret_eval_err
  )
611

612
}