engine.py 23.6 KB
Newer Older
wxchan's avatar
wxchan committed
1
2
3
4
5
# coding: utf-8
# pylint: disable = invalid-name, W0105
"""Training Library containing training routines of LightGBM."""
from __future__ import absolute_import

wxchan's avatar
wxchan committed
6
import collections
7
import copy
8
import warnings
wxchan's avatar
wxchan committed
9
from operator import attrgetter
10

wxchan's avatar
wxchan committed
11
import numpy as np
12

wxchan's avatar
wxchan committed
13
from . import callback
wxchan's avatar
wxchan committed
14
from .basic import Booster, Dataset, LightGBMError, _InnerPredictor
15
from .compat import (SKLEARN_INSTALLED, _LGBMGroupKFold, _LGBMStratifiedKFold,
16
                     string_type, integer_types, range_, zip_)
wxchan's avatar
wxchan committed
17

wxchan's avatar
wxchan committed
18

Guolin Ke's avatar
Guolin Ke committed
19
20
def train(params, train_set, num_boost_round=100,
          valid_sets=None, valid_names=None,
wxchan's avatar
wxchan committed
21
          fobj=None, feval=None, init_model=None,
22
          feature_name='auto', categorical_feature='auto',
wxchan's avatar
wxchan committed
23
          early_stopping_rounds=None, evals_result=None,
24
25
          verbose_eval=True, learning_rates=None,
          keep_training_booster=False, callbacks=None):
26
    """Perform the training with given parameters.
wxchan's avatar
wxchan committed
27
28
29
30

    Parameters
    ----------
    params : dict
31
        Parameters for training.
Guolin Ke's avatar
Guolin Ke committed
32
    train_set : Dataset
wxchan's avatar
wxchan committed
33
        Data to be trained.
34
    num_boost_round: int, optional (default=100)
wxchan's avatar
wxchan committed
35
        Number of boosting iterations.
36
37
38
39
40
    valid_sets: list of Datasets or None, optional (default=None)
        List of data to be evaluated during training.
    valid_names: list of string or None, optional (default=None)
        Names of ``valid_sets``.
    fobj : callable or None, optional (default=None)
wxchan's avatar
wxchan committed
41
        Customized objective function.
42
    feval : callable or None, optional (default=None)
wxchan's avatar
wxchan committed
43
        Customized evaluation function.
Preston Parry's avatar
Preston Parry committed
44
        Should accept two parameters: preds, train_data.
45
46
        For multi-class task, the preds is group by class_id first, then group by row_id.
        If you want to get i-th row preds in j-th class, the access way is preds[j * num_data + i].
47
        Note: should return (eval_name, eval_result, is_higher_better) or list of such tuples.
48
49
        To ignore the default metric corresponding to the used objective,
        set the ``metric`` parameter to the string ``"None"`` in ``params``.
50
    init_model : string, Booster or None, optional (default=None)
51
52
53
54
55
56
57
58
59
        Filename of LightGBM model or Booster instance used for continue training.
    feature_name : list of strings or 'auto', optional (default="auto")
        Feature names.
        If 'auto' and data is pandas DataFrame, data columns names are used.
    categorical_feature : list of strings or int, or 'auto', optional (default="auto")
        Categorical features.
        If list of int, interpreted as indices.
        If list of strings, interpreted as feature names (need to specify ``feature_name`` as well).
        If 'auto' and data is pandas DataFrame, pandas categorical columns are used.
60
        All values in categorical features should be less than int32 max value (2147483647).
61
        Large values could be memory consuming. Consider using consecutive integers starting from zero.
62
        All negative values in categorical features will be treated as missing values.
63
64
    early_stopping_rounds: int or None, optional (default=None)
        Activates early stopping. The model will train until the validation score stops improving.
65
66
67
68
        Validation score needs to improve at least every ``early_stopping_rounds`` round(s)
        to continue training.
        Requires at least one validation data and one metric.
        If there's more than one, will check all of them. But the training data is ignored anyway.
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
        If early stopping occurs, the model will add ``best_iteration`` field.
    evals_result: dict or None, optional (default=None)
        This dictionary used to store all evaluation results of all the items in ``valid_sets``.

        Example
        -------
        With a ``valid_sets`` = [valid_set, train_set],
        ``valid_names`` = ['eval', 'train']
        and a ``params`` = ('metric':'logloss')
        returns: {'train': {'logloss': ['0.48253', '0.35953', ...]},
        'eval': {'logloss': ['0.480385', '0.357756', ...]}}.
    verbose_eval : bool or int, optional (default=True)
        Requires at least one validation data.
        If True, the eval metric on the valid set is printed at each boosting stage.
        If int, the eval metric on the valid set is printed at every ``verbose_eval`` boosting stage.
        The last boosting stage or the boosting stage found by using ``early_stopping_rounds`` is also printed.

        Example
        -------
        With ``verbose_eval`` = 4 and at least one item in evals,
        an evaluation metric is printed every 4 (instead of 1) boosting stages.
    learning_rates: list, callable or None, optional (default=None)
        List of learning rates for each boosting round
        or a customized function that calculates ``learning_rate``
        in terms of current number of round (e.g. yields learning rate decay).
    keep_training_booster : bool, optional (default=False)
        Whether the returned Booster will be used to keep training.
        If False, the returned value will be converted into _InnerPredictor before returning.
        You can still use _InnerPredictor as ``init_model`` for future continue training.
    callbacks : list of callables or None, optional (default=None)
99
        List of callback functions that are applied at each iteration.
100
        See Callbacks in Python API for more information.
wxchan's avatar
wxchan committed
101
102
103

    Returns
    -------
104
105
    booster : Booster
        The trained Booster model.
wxchan's avatar
wxchan committed
106
    """
107
    # create predictor first
108
    params = copy.deepcopy(params)
109
110
    for alias in ["num_iterations", "num_iteration", "n_iter", "num_tree", "num_trees",
                  "num_round", "num_rounds", "num_boost_round", "n_estimators"]:
111
        if alias in params:
112
            num_boost_round = int(params.pop(alias))
113
114
115
            warnings.warn("Found `{}` in params. Will use it instead of argument".format(alias))
            break
    for alias in ["early_stopping_round", "early_stopping_rounds", "early_stopping"]:
116
117
        if alias in params and params[alias] is not None:
            early_stopping_rounds = int(params.pop(alias))
118
119
120
            warnings.warn("Found `{}` in params. Will use it instead of argument".format(alias))
            break

121
122
    if num_boost_round <= 0:
        raise ValueError("num_boost_round should be greater than zero.")
wxchan's avatar
wxchan committed
123
    if isinstance(init_model, string_type):
124
        predictor = _InnerPredictor(model_file=init_model, pred_parameter=params)
wxchan's avatar
wxchan committed
125
    elif isinstance(init_model, Booster):
126
        predictor = init_model._to_predictor(dict(init_model.params, **params))
wxchan's avatar
wxchan committed
127
128
    else:
        predictor = None
129
    init_iteration = predictor.num_total_iteration if predictor is not None else 0
130
    # check dataset
Guolin Ke's avatar
Guolin Ke committed
131
    if not isinstance(train_set, Dataset):
132
        raise TypeError("Training only accepts Dataset object")
Guolin Ke's avatar
Guolin Ke committed
133

134
135
136
137
    train_set._update_params(params) \
             ._set_predictor(predictor) \
             .set_feature_name(feature_name) \
             .set_categorical_feature(categorical_feature)
Guolin Ke's avatar
Guolin Ke committed
138

wxchan's avatar
wxchan committed
139
140
    is_valid_contain_train = False
    train_data_name = "training"
Guolin Ke's avatar
Guolin Ke committed
141
    reduced_valid_sets = []
wxchan's avatar
wxchan committed
142
    name_valid_sets = []
143
    if valid_sets is not None:
Guolin Ke's avatar
Guolin Ke committed
144
145
        if isinstance(valid_sets, Dataset):
            valid_sets = [valid_sets]
wxchan's avatar
wxchan committed
146
        if isinstance(valid_names, string_type):
wxchan's avatar
wxchan committed
147
            valid_names = [valid_names]
Guolin Ke's avatar
Guolin Ke committed
148
        for i, valid_data in enumerate(valid_sets):
149
            # reduce cost for prediction training data
Guolin Ke's avatar
Guolin Ke committed
150
            if valid_data is train_set:
wxchan's avatar
wxchan committed
151
152
153
154
                is_valid_contain_train = True
                if valid_names is not None:
                    train_data_name = valid_names[i]
                continue
Guolin Ke's avatar
Guolin Ke committed
155
            if not isinstance(valid_data, Dataset):
156
                raise TypeError("Traninig only accepts Dataset object")
Nikita Titov's avatar
Nikita Titov committed
157
            reduced_valid_sets.append(valid_data._update_params(params).set_reference(train_set))
158
            if valid_names is not None and len(valid_names) > i:
wxchan's avatar
wxchan committed
159
160
                name_valid_sets.append(valid_names[i])
            else:
wxchan's avatar
wxchan committed
161
                name_valid_sets.append('valid_' + str(i))
162
    # process callbacks
163
    if callbacks is None:
wxchan's avatar
wxchan committed
164
165
166
167
168
        callbacks = set()
    else:
        for i, cb in enumerate(callbacks):
            cb.__dict__.setdefault('order', i - len(callbacks))
        callbacks = set(callbacks)
wxchan's avatar
wxchan committed
169
170

    # Most of legacy advanced options becomes callbacks
wxchan's avatar
wxchan committed
171
172
    if verbose_eval is True:
        callbacks.add(callback.print_evaluation())
wxchan's avatar
wxchan committed
173
    elif isinstance(verbose_eval, integer_types):
wxchan's avatar
wxchan committed
174
        callbacks.add(callback.print_evaluation(verbose_eval))
wxchan's avatar
wxchan committed
175

176
    if early_stopping_rounds is not None:
177
        callbacks.add(callback.early_stopping(early_stopping_rounds, verbose=bool(verbose_eval)))
178

wxchan's avatar
wxchan committed
179
    if learning_rates is not None:
180
        callbacks.add(callback.reset_parameter(learning_rate=learning_rates))
wxchan's avatar
wxchan committed
181
182

    if evals_result is not None:
wxchan's avatar
wxchan committed
183
184
185
186
187
188
        callbacks.add(callback.record_evaluation(evals_result))

    callbacks_before_iter = {cb for cb in callbacks if getattr(cb, 'before_iteration', False)}
    callbacks_after_iter = callbacks - callbacks_before_iter
    callbacks_before_iter = sorted(callbacks_before_iter, key=attrgetter('order'))
    callbacks_after_iter = sorted(callbacks_after_iter, key=attrgetter('order'))
wxchan's avatar
wxchan committed
189

190
    # construct booster
191
192
193
194
    try:
        booster = Booster(params=params, train_set=train_set)
        if is_valid_contain_train:
            booster.set_train_data_name(train_data_name)
195
        for valid_set, name_valid_set in zip_(reduced_valid_sets, name_valid_sets):
196
197
198
199
200
            booster.add_valid(valid_set, name_valid_set)
    finally:
        train_set._reverse_update_params()
        for valid_set in reduced_valid_sets:
            valid_set._reverse_update_params()
201
    booster.best_iteration = 0
wxchan's avatar
wxchan committed
202

203
    # start training
wxchan's avatar
wxchan committed
204
    for i in range_(init_iteration, init_iteration + num_boost_round):
wxchan's avatar
wxchan committed
205
206
        for cb in callbacks_before_iter:
            cb(callback.CallbackEnv(model=booster,
207
                                    params=params,
wxchan's avatar
wxchan committed
208
                                    iteration=i,
209
210
                                    begin_iteration=init_iteration,
                                    end_iteration=init_iteration + num_boost_round,
wxchan's avatar
wxchan committed
211
212
213
214
215
216
                                    evaluation_result_list=None))

        booster.update(fobj=fobj)

        evaluation_result_list = []
        # check evaluation result.
217
        if valid_sets is not None:
wxchan's avatar
wxchan committed
218
219
220
221
222
223
            if is_valid_contain_train:
                evaluation_result_list.extend(booster.eval_train(feval))
            evaluation_result_list.extend(booster.eval_valid(feval))
        try:
            for cb in callbacks_after_iter:
                cb(callback.CallbackEnv(model=booster,
224
                                        params=params,
wxchan's avatar
wxchan committed
225
                                        iteration=i,
226
227
                                        begin_iteration=init_iteration,
                                        end_iteration=init_iteration + num_boost_round,
wxchan's avatar
wxchan committed
228
                                        evaluation_result_list=evaluation_result_list))
229
230
        except callback.EarlyStopException as earlyStopException:
            booster.best_iteration = earlyStopException.best_iteration + 1
wxchan's avatar
wxchan committed
231
            evaluation_result_list = earlyStopException.best_score
wxchan's avatar
wxchan committed
232
            break
wxchan's avatar
wxchan committed
233
234
235
    booster.best_score = collections.defaultdict(dict)
    for dataset_name, eval_name, score, _ in evaluation_result_list:
        booster.best_score[dataset_name][eval_name] = score
236
    if not keep_training_booster:
Nikita Titov's avatar
Nikita Titov committed
237
        booster.model_from_string(booster.model_to_string(), False).free_dataset()
wxchan's avatar
wxchan committed
238
239
240
241
    return booster


class CVBooster(object):
242
243
244
    """"Auxiliary data struct to hold all boosters of CV."""
    def __init__(self):
        self.boosters = []
245
        self.best_iteration = -1
246
247
248
249
250
251
252
253
254
255
256
257
258
259

    def append(self, booster):
        """add a booster to CVBooster"""
        self.boosters.append(booster)

    def __getattr__(self, name):
        """redirect methods call of CVBooster"""
        def handlerFunction(*args, **kwargs):
            """call methods with each booster, and concatenate their results"""
            ret = []
            for booster in self.boosters:
                ret.append(getattr(booster, name)(*args, **kwargs))
            return ret
        return handlerFunction
wxchan's avatar
wxchan committed
260

261

262
def _make_n_folds(full_data, folds, nfold, params, seed, fpreproc=None, stratified=True, shuffle=True):
wxchan's avatar
wxchan committed
263
    """
264
    Make an n-fold list of Booster from random indices.
wxchan's avatar
wxchan committed
265
    """
wxchan's avatar
wxchan committed
266
267
    full_data = full_data.construct()
    num_data = full_data.num_data()
268
    if folds is not None:
269
270
271
272
273
274
275
276
277
278
279
        if not hasattr(folds, '__iter__') and not hasattr(folds, 'split'):
            raise AttributeError("folds should be a generator or iterator of (train_idx, test_idx) tuples "
                                 "or scikit-learn splitter object with split method")
        if hasattr(folds, 'split'):
            group_info = full_data.get_group()
            if group_info is not None:
                group_info = group_info.astype(int)
                flatted_group = np.repeat(range_(len(group_info)), repeats=group_info)
            else:
                flatted_group = np.zeros(num_data, dtype=int)
            folds = folds.split(X=np.zeros(num_data), y=full_data.get_label(), groups=flatted_group)
wxchan's avatar
wxchan committed
280
    else:
wxchan's avatar
wxchan committed
281
282
283
284
285
        if 'objective' in params and params['objective'] == 'lambdarank':
            if not SKLEARN_INSTALLED:
                raise LightGBMError('Scikit-learn is required for lambdarank cv.')
            # lambdarank task, split according to groups
            group_info = full_data.get_group().astype(int)
286
            flatted_group = np.repeat(range_(len(group_info)), repeats=group_info)
287
            group_kfold = _LGBMGroupKFold(n_splits=nfold)
wxchan's avatar
wxchan committed
288
289
290
291
            folds = group_kfold.split(X=np.zeros(num_data), groups=flatted_group)
        elif stratified:
            if not SKLEARN_INSTALLED:
                raise LightGBMError('Scikit-learn is required for stratified cv.')
292
            skf = _LGBMStratifiedKFold(n_splits=nfold, shuffle=shuffle, random_state=seed)
wxchan's avatar
wxchan committed
293
            folds = skf.split(X=np.zeros(num_data), y=full_data.get_label())
extremin's avatar
extremin committed
294
        else:
wxchan's avatar
wxchan committed
295
296
297
298
299
300
301
            if shuffle:
                randidx = np.random.RandomState(seed).permutation(num_data)
            else:
                randidx = np.arange(num_data)
            kstep = int(num_data / nfold)
            test_id = [randidx[i: i + kstep] for i in range_(0, num_data, kstep)]
            train_id = [np.concatenate([test_id[i] for i in range_(nfold) if k != i]) for k in range_(nfold)]
302
            folds = zip_(train_id, test_id)
wxchan's avatar
wxchan committed
303

304
    ret = CVBooster()
wxchan's avatar
wxchan committed
305
306
307
    for train_idx, test_idx in folds:
        train_set = full_data.subset(train_idx)
        valid_set = full_data.subset(test_idx)
wxchan's avatar
wxchan committed
308
309
        # run preprocessing on the data set if needed
        if fpreproc is not None:
wxchan's avatar
wxchan committed
310
            train_set, valid_set, tparam = fpreproc(train_set, valid_set, params.copy())
wxchan's avatar
wxchan committed
311
        else:
wxchan's avatar
wxchan committed
312
            tparam = params
313
314
315
        cvbooster = Booster(tparam, train_set)
        cvbooster.add_valid(valid_set, 'valid')
        ret.append(cvbooster)
wxchan's avatar
wxchan committed
316
317
    return ret

wxchan's avatar
wxchan committed
318

wxchan's avatar
wxchan committed
319
320
321
322
def _agg_cv_result(raw_results):
    """
    Aggregate cross-validation results.
    """
wxchan's avatar
wxchan committed
323
    cvmap = collections.defaultdict(list)
wxchan's avatar
wxchan committed
324
325
326
    metric_type = {}
    for one_result in raw_results:
        for one_line in one_result:
wxchan's avatar
wxchan committed
327
328
329
            metric_type[one_line[1]] = one_line[3]
            cvmap[one_line[1]].append(one_line[2])
    return [('cv_agg', k, np.mean(v), metric_type[k], np.std(v)) for k, v in cvmap.items()]
wxchan's avatar
wxchan committed
330

wxchan's avatar
wxchan committed
331

332
def cv(params, train_set, num_boost_round=100,
333
       folds=None, nfold=5, stratified=True, shuffle=True,
wxchan's avatar
wxchan committed
334
       metrics=None, fobj=None, feval=None, init_model=None,
335
       feature_name='auto', categorical_feature='auto',
Guolin Ke's avatar
Guolin Ke committed
336
337
       early_stopping_rounds=None, fpreproc=None,
       verbose_eval=None, show_stdv=True, seed=0,
wxchan's avatar
wxchan committed
338
       callbacks=None):
339
    """Perform the cross-validation with given paramaters.
wxchan's avatar
wxchan committed
340
341
342
343

    Parameters
    ----------
    params : dict
344
        Parameters for Booster.
Guolin Ke's avatar
Guolin Ke committed
345
    train_set : Dataset
346
        Data to be trained on.
347
    num_boost_round : int, optional (default=100)
wxchan's avatar
wxchan committed
348
        Number of boosting iterations.
349
    folds : generator or iterator of (train_idx, test_idx) tuples, scikit-learn splitter object or None, optional (default=None)
350
        If generator or iterator, it should yield the train and test indices for each fold.
351
352
353
        If object, it should be one of the scikit-learn splitter classes
        (http://scikit-learn.org/stable/modules/classes.html#splitter-classes)
        and have ``split`` method.
354
        This argument has highest priority over other data split arguments.
355
    nfold : int, optional (default=5)
wxchan's avatar
wxchan committed
356
        Number of folds in CV.
357
358
359
360
361
362
363
364
    stratified : bool, optional (default=True)
        Whether to perform stratified sampling.
    shuffle: bool, optional (default=True)
        Whether to shuffle before splitting data.
    metrics : string, list of strings or None, optional (default=None)
        Evaluation metrics to be monitored while CV.
        If not None, the metric in ``params`` will be overridden.
    fobj : callable or None, optional (default=None)
wxchan's avatar
wxchan committed
365
        Custom objective function.
366
    feval : callable or None, optional (default=None)
367
368
369
370
371
        Customized evaluation function.
        Should accept two parameters: preds, train_data.
        For multi-class task, the preds is group by class_id first, then group by row_id.
        If you want to get i-th row preds in j-th class, the access way is preds[j * num_data + i].
        Note: should return (eval_name, eval_result, is_higher_better) or list of such tuples.
372
373
        To ignore the default metric corresponding to the used objective,
        set ``metrics`` to the string ``"None"``.
374
    init_model : string, Booster or None, optional (default=None)
375
376
377
378
379
380
381
382
383
        Filename of LightGBM model or Booster instance used for continue training.
    feature_name : list of strings or 'auto', optional (default="auto")
        Feature names.
        If 'auto' and data is pandas DataFrame, data columns names are used.
    categorical_feature : list of strings or int, or 'auto', optional (default="auto")
        Categorical features.
        If list of int, interpreted as indices.
        If list of strings, interpreted as feature names (need to specify ``feature_name`` as well).
        If 'auto' and data is pandas DataFrame, pandas categorical columns are used.
384
        All values in categorical features should be less than int32 max value (2147483647).
385
        Large values could be memory consuming. Consider using consecutive integers starting from zero.
386
        All negative values in categorical features will be treated as missing values.
387
    early_stopping_rounds: int or None, optional (default=None)
388
389
390
391
        Activates early stopping.
        CV score needs to improve at least every ``early_stopping_rounds`` round(s)
        to continue.
        Requires at least one metric. If there's more than one, will check all of them.
wxchan's avatar
wxchan committed
392
        Last entry in evaluation history is the one from best iteration.
393
394
    fpreproc : callable or None, optional (default=None)
        Preprocessing function that takes (dtrain, dtest, params)
wxchan's avatar
wxchan committed
395
        and returns transformed versions of those.
396
    verbose_eval : bool, int, or None, optional (default=None)
wxchan's avatar
wxchan committed
397
398
        Whether to display the progress.
        If None, progress will be displayed when np.ndarray is returned.
399
400
401
        If True, progress will be displayed at every boosting stage.
        If int, progress will be displayed at every given ``verbose_eval`` boosting stage.
    show_stdv : bool, optional (default=True)
wxchan's avatar
wxchan committed
402
        Whether to display the standard deviation in progress.
403
404
        Results are not affected by this parameter, and always contains std.
    seed : int, optional (default=0)
wxchan's avatar
wxchan committed
405
        Seed used to generate the folds (passed to numpy.random.seed).
406
    callbacks : list of callables or None, optional (default=None)
407
        List of callback functions that are applied at each iteration.
408
        See Callbacks in Python API for more information.
wxchan's avatar
wxchan committed
409
410
411

    Returns
    -------
412
413
414
415
    eval_hist : dict
        Evaluation history.
        The dictionary has the following format:
        {'metric1-mean': [values], 'metric1-stdv': [values],
Qiwei Ye's avatar
Qiwei Ye committed
416
        'metric2-mean': [values], 'metric2-stdv': [values],
417
        ...}.
wxchan's avatar
wxchan committed
418
    """
Guolin Ke's avatar
Guolin Ke committed
419
    if not isinstance(train_set, Dataset):
420
        raise TypeError("Traninig only accepts Dataset object")
Guolin Ke's avatar
Guolin Ke committed
421

422
    params = copy.deepcopy(params)
423
424
    for alias in ["num_iterations", "num_iteration", "n_iter", "num_tree", "num_trees",
                  "num_round", "num_rounds", "num_boost_round", "n_estimators"]:
425
426
427
428
429
430
431
432
433
434
        if alias in params:
            warnings.warn("Found `{}` in params. Will use it instead of argument".format(alias))
            num_boost_round = params.pop(alias)
            break
    for alias in ["early_stopping_round", "early_stopping_rounds", "early_stopping"]:
        if alias in params:
            warnings.warn("Found `{}` in params. Will use it instead of argument".format(alias))
            early_stopping_rounds = params.pop(alias)
            break

435
436
    if num_boost_round <= 0:
        raise ValueError("num_boost_round should be greater than zero.")
wxchan's avatar
wxchan committed
437
    if isinstance(init_model, string_type):
438
        predictor = _InnerPredictor(model_file=init_model, pred_parameter=params)
Guolin Ke's avatar
Guolin Ke committed
439
    elif isinstance(init_model, Booster):
440
        predictor = init_model._to_predictor(dict(init_model.params, **params))
Guolin Ke's avatar
Guolin Ke committed
441
442
    else:
        predictor = None
443
444
445
446
    train_set._update_params(params) \
             ._set_predictor(predictor) \
             .set_feature_name(feature_name) \
             .set_categorical_feature(categorical_feature)
Guolin Ke's avatar
Guolin Ke committed
447

Peter's avatar
Peter committed
448
449
    if metrics is not None:
        params['metric'] = metrics
wxchan's avatar
wxchan committed
450

wxchan's avatar
wxchan committed
451
    results = collections.defaultdict(list)
452
453
454
    cvfolds = _make_n_folds(train_set, folds=folds, nfold=nfold,
                            params=params, seed=seed, fpreproc=fpreproc,
                            stratified=stratified, shuffle=shuffle)
wxchan's avatar
wxchan committed
455
456

    # setup callbacks
457
    if callbacks is None:
wxchan's avatar
wxchan committed
458
459
460
461
462
        callbacks = set()
    else:
        for i, cb in enumerate(callbacks):
            cb.__dict__.setdefault('order', i - len(callbacks))
        callbacks = set(callbacks)
463
    if early_stopping_rounds is not None:
464
        callbacks.add(callback.early_stopping(early_stopping_rounds, verbose=False))
wxchan's avatar
wxchan committed
465
466
    if verbose_eval is True:
        callbacks.add(callback.print_evaluation(show_stdv=show_stdv))
wxchan's avatar
wxchan committed
467
    elif isinstance(verbose_eval, integer_types):
wxchan's avatar
wxchan committed
468
        callbacks.add(callback.print_evaluation(verbose_eval, show_stdv=show_stdv))
wxchan's avatar
wxchan committed
469

wxchan's avatar
wxchan committed
470
471
472
473
    callbacks_before_iter = {cb for cb in callbacks if getattr(cb, 'before_iteration', False)}
    callbacks_after_iter = callbacks - callbacks_before_iter
    callbacks_before_iter = sorted(callbacks_before_iter, key=attrgetter('order'))
    callbacks_after_iter = sorted(callbacks_after_iter, key=attrgetter('order'))
wxchan's avatar
wxchan committed
474

wxchan's avatar
wxchan committed
475
    for i in range_(num_boost_round):
wxchan's avatar
wxchan committed
476
        for cb in callbacks_before_iter:
477
478
            cb(callback.CallbackEnv(model=cvfolds,
                                    params=params,
wxchan's avatar
wxchan committed
479
480
481
482
                                    iteration=i,
                                    begin_iteration=0,
                                    end_iteration=num_boost_round,
                                    evaluation_result_list=None))
wxchan's avatar
wxchan committed
483
        cvfolds.update(fobj=fobj)
484
        res = _agg_cv_result(cvfolds.eval_valid(feval))
wxchan's avatar
wxchan committed
485
486
        for _, key, mean, _, std in res:
            results[key + '-mean'].append(mean)
wxchan's avatar
wxchan committed
487
            results[key + '-stdv'].append(std)
wxchan's avatar
wxchan committed
488
489
        try:
            for cb in callbacks_after_iter:
490
491
                cb(callback.CallbackEnv(model=cvfolds,
                                        params=params,
wxchan's avatar
wxchan committed
492
493
494
495
                                        iteration=i,
                                        begin_iteration=0,
                                        end_iteration=num_boost_round,
                                        evaluation_result_list=res))
496
497
        except callback.EarlyStopException as earlyStopException:
            cvfolds.best_iteration = earlyStopException.best_iteration + 1
wxchan's avatar
wxchan committed
498
            for k in results:
499
                results[k] = results[k][:cvfolds.best_iteration]
wxchan's avatar
wxchan committed
500
            break
wxchan's avatar
wxchan committed
501
    return dict(results)