engine.py 23.5 KB
Newer Older
wxchan's avatar
wxchan committed
1
2
3
4
5
# coding: utf-8
# pylint: disable = invalid-name, W0105
"""Training Library containing training routines of LightGBM."""
from __future__ import absolute_import

wxchan's avatar
wxchan committed
6
import collections
7
import warnings
wxchan's avatar
wxchan committed
8
from operator import attrgetter
9

wxchan's avatar
wxchan committed
10
import numpy as np
11

wxchan's avatar
wxchan committed
12
from . import callback
wxchan's avatar
wxchan committed
13
from .basic import Booster, Dataset, LightGBMError, _InnerPredictor
14
from .compat import (SKLEARN_INSTALLED, _LGBMGroupKFold, _LGBMStratifiedKFold,
15
                     integer_types, range_, zip_, string_type)
wxchan's avatar
wxchan committed
16

wxchan's avatar
wxchan committed
17

Guolin Ke's avatar
Guolin Ke committed
18
19
def train(params, train_set, num_boost_round=100,
          valid_sets=None, valid_names=None,
wxchan's avatar
wxchan committed
20
          fobj=None, feval=None, init_model=None,
21
          feature_name='auto', categorical_feature='auto',
wxchan's avatar
wxchan committed
22
          early_stopping_rounds=None, evals_result=None,
23
24
          verbose_eval=True, learning_rates=None,
          keep_training_booster=False, callbacks=None):
25
    """Perform the training with given parameters.
wxchan's avatar
wxchan committed
26
27
28
29

    Parameters
    ----------
    params : dict
30
        Parameters for training.
Guolin Ke's avatar
Guolin Ke committed
31
    train_set : Dataset
wxchan's avatar
wxchan committed
32
        Data to be trained.
33
    num_boost_round: int, optional (default=100)
wxchan's avatar
wxchan committed
34
        Number of boosting iterations.
35
36
37
38
39
    valid_sets: list of Datasets or None, optional (default=None)
        List of data to be evaluated during training.
    valid_names: list of string or None, optional (default=None)
        Names of ``valid_sets``.
    fobj : callable or None, optional (default=None)
wxchan's avatar
wxchan committed
40
        Customized objective function.
41
    feval : callable or None, optional (default=None)
wxchan's avatar
wxchan committed
42
        Customized evaluation function.
Preston Parry's avatar
Preston Parry committed
43
        Should accept two parameters: preds, train_data.
44
45
        For multi-class task, the preds is group by class_id first, then group by row_id.
        If you want to get i-th row preds in j-th class, the access way is preds[j * num_data + i].
46
        Note: should return (eval_name, eval_result, is_higher_better) or list of such tuples.
47
48
        To ignore the default metric corresponding to the used objective,
        set the ``metric`` parameter to the string ``"None"`` in ``params``.
49
    init_model : string, Booster or None, optional (default=None)
50
51
52
53
54
55
56
57
58
        Filename of LightGBM model or Booster instance used for continue training.
    feature_name : list of strings or 'auto', optional (default="auto")
        Feature names.
        If 'auto' and data is pandas DataFrame, data columns names are used.
    categorical_feature : list of strings or int, or 'auto', optional (default="auto")
        Categorical features.
        If list of int, interpreted as indices.
        If list of strings, interpreted as feature names (need to specify ``feature_name`` as well).
        If 'auto' and data is pandas DataFrame, pandas categorical columns are used.
59
        All values in categorical features should be less than int32 max value (2147483647).
60
        Large values could be memory consuming. Consider to use consecutive integers started from zero.
61
        All negative values in categorical features will be treated as missing values.
62
63
    early_stopping_rounds: int or None, optional (default=None)
        Activates early stopping. The model will train until the validation score stops improving.
64
65
66
67
        Validation score needs to improve at least every ``early_stopping_rounds`` round(s)
        to continue training.
        Requires at least one validation data and one metric.
        If there's more than one, will check all of them. But the training data is ignored anyway.
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
        If early stopping occurs, the model will add ``best_iteration`` field.
    evals_result: dict or None, optional (default=None)
        This dictionary used to store all evaluation results of all the items in ``valid_sets``.

        Example
        -------
        With a ``valid_sets`` = [valid_set, train_set],
        ``valid_names`` = ['eval', 'train']
        and a ``params`` = ('metric':'logloss')
        returns: {'train': {'logloss': ['0.48253', '0.35953', ...]},
        'eval': {'logloss': ['0.480385', '0.357756', ...]}}.
    verbose_eval : bool or int, optional (default=True)
        Requires at least one validation data.
        If True, the eval metric on the valid set is printed at each boosting stage.
        If int, the eval metric on the valid set is printed at every ``verbose_eval`` boosting stage.
        The last boosting stage or the boosting stage found by using ``early_stopping_rounds`` is also printed.

        Example
        -------
        With ``verbose_eval`` = 4 and at least one item in evals,
        an evaluation metric is printed every 4 (instead of 1) boosting stages.
    learning_rates: list, callable or None, optional (default=None)
        List of learning rates for each boosting round
        or a customized function that calculates ``learning_rate``
        in terms of current number of round (e.g. yields learning rate decay).
    keep_training_booster : bool, optional (default=False)
        Whether the returned Booster will be used to keep training.
        If False, the returned value will be converted into _InnerPredictor before returning.
        You can still use _InnerPredictor as ``init_model`` for future continue training.
    callbacks : list of callables or None, optional (default=None)
98
        List of callback functions that are applied at each iteration.
99
        See Callbacks in Python API for more information.
wxchan's avatar
wxchan committed
100
101
102

    Returns
    -------
103
104
    booster : Booster
        The trained Booster model.
wxchan's avatar
wxchan committed
105
    """
106
    # create predictor first
107
108
    for alias in ["num_iterations", "num_iteration", "n_iter", "num_tree", "num_trees",
                  "num_round", "num_rounds", "num_boost_round", "n_estimators"]:
109
        if alias in params:
110
            num_boost_round = int(params.pop(alias))
111
112
113
            warnings.warn("Found `{}` in params. Will use it instead of argument".format(alias))
            break
    for alias in ["early_stopping_round", "early_stopping_rounds", "early_stopping"]:
114
115
        if alias in params and params[alias] is not None:
            early_stopping_rounds = int(params.pop(alias))
116
117
118
            warnings.warn("Found `{}` in params. Will use it instead of argument".format(alias))
            break

119
120
    if num_boost_round <= 0:
        raise ValueError("num_boost_round should be greater than zero.")
wxchan's avatar
wxchan committed
121
    if isinstance(init_model, string_type):
122
        predictor = _InnerPredictor(model_file=init_model, pred_parameter=params)
wxchan's avatar
wxchan committed
123
    elif isinstance(init_model, Booster):
124
        predictor = init_model._to_predictor(dict(init_model.params, **params))
wxchan's avatar
wxchan committed
125
126
    else:
        predictor = None
127
    init_iteration = predictor.num_total_iteration if predictor is not None else 0
128
    # check dataset
Guolin Ke's avatar
Guolin Ke committed
129
    if not isinstance(train_set, Dataset):
130
        raise TypeError("Training only accepts Dataset object")
Guolin Ke's avatar
Guolin Ke committed
131

132
133
134
135
    train_set._update_params(params) \
             ._set_predictor(predictor) \
             .set_feature_name(feature_name) \
             .set_categorical_feature(categorical_feature)
Guolin Ke's avatar
Guolin Ke committed
136

wxchan's avatar
wxchan committed
137
138
    is_valid_contain_train = False
    train_data_name = "training"
Guolin Ke's avatar
Guolin Ke committed
139
    reduced_valid_sets = []
wxchan's avatar
wxchan committed
140
    name_valid_sets = []
141
    if valid_sets is not None:
Guolin Ke's avatar
Guolin Ke committed
142
143
        if isinstance(valid_sets, Dataset):
            valid_sets = [valid_sets]
wxchan's avatar
wxchan committed
144
        if isinstance(valid_names, string_type):
wxchan's avatar
wxchan committed
145
            valid_names = [valid_names]
Guolin Ke's avatar
Guolin Ke committed
146
        for i, valid_data in enumerate(valid_sets):
147
            # reduce cost for prediction training data
Guolin Ke's avatar
Guolin Ke committed
148
            if valid_data is train_set:
wxchan's avatar
wxchan committed
149
150
151
152
                is_valid_contain_train = True
                if valid_names is not None:
                    train_data_name = valid_names[i]
                continue
Guolin Ke's avatar
Guolin Ke committed
153
            if not isinstance(valid_data, Dataset):
154
                raise TypeError("Traninig only accepts Dataset object")
Nikita Titov's avatar
Nikita Titov committed
155
            reduced_valid_sets.append(valid_data._update_params(params).set_reference(train_set))
156
            if valid_names is not None and len(valid_names) > i:
wxchan's avatar
wxchan committed
157
158
                name_valid_sets.append(valid_names[i])
            else:
wxchan's avatar
wxchan committed
159
                name_valid_sets.append('valid_' + str(i))
160
    # process callbacks
161
    if callbacks is None:
wxchan's avatar
wxchan committed
162
163
164
165
166
        callbacks = set()
    else:
        for i, cb in enumerate(callbacks):
            cb.__dict__.setdefault('order', i - len(callbacks))
        callbacks = set(callbacks)
wxchan's avatar
wxchan committed
167
168

    # Most of legacy advanced options becomes callbacks
wxchan's avatar
wxchan committed
169
170
    if verbose_eval is True:
        callbacks.add(callback.print_evaluation())
wxchan's avatar
wxchan committed
171
    elif isinstance(verbose_eval, integer_types):
wxchan's avatar
wxchan committed
172
        callbacks.add(callback.print_evaluation(verbose_eval))
wxchan's avatar
wxchan committed
173

174
    if early_stopping_rounds is not None:
175
        callbacks.add(callback.early_stopping(early_stopping_rounds, verbose=bool(verbose_eval)))
176

wxchan's avatar
wxchan committed
177
    if learning_rates is not None:
178
        callbacks.add(callback.reset_parameter(learning_rate=learning_rates))
wxchan's avatar
wxchan committed
179
180

    if evals_result is not None:
wxchan's avatar
wxchan committed
181
182
183
184
185
186
        callbacks.add(callback.record_evaluation(evals_result))

    callbacks_before_iter = {cb for cb in callbacks if getattr(cb, 'before_iteration', False)}
    callbacks_after_iter = callbacks - callbacks_before_iter
    callbacks_before_iter = sorted(callbacks_before_iter, key=attrgetter('order'))
    callbacks_after_iter = sorted(callbacks_after_iter, key=attrgetter('order'))
wxchan's avatar
wxchan committed
187

188
    # construct booster
189
190
191
192
    try:
        booster = Booster(params=params, train_set=train_set)
        if is_valid_contain_train:
            booster.set_train_data_name(train_data_name)
193
        for valid_set, name_valid_set in zip_(reduced_valid_sets, name_valid_sets):
194
195
196
197
198
            booster.add_valid(valid_set, name_valid_set)
    finally:
        train_set._reverse_update_params()
        for valid_set in reduced_valid_sets:
            valid_set._reverse_update_params()
199
    booster.best_iteration = 0
wxchan's avatar
wxchan committed
200

201
    # start training
wxchan's avatar
wxchan committed
202
    for i in range_(init_iteration, init_iteration + num_boost_round):
wxchan's avatar
wxchan committed
203
204
        for cb in callbacks_before_iter:
            cb(callback.CallbackEnv(model=booster,
205
                                    params=params,
wxchan's avatar
wxchan committed
206
                                    iteration=i,
207
208
                                    begin_iteration=init_iteration,
                                    end_iteration=init_iteration + num_boost_round,
wxchan's avatar
wxchan committed
209
210
211
212
213
214
                                    evaluation_result_list=None))

        booster.update(fobj=fobj)

        evaluation_result_list = []
        # check evaluation result.
215
        if valid_sets is not None:
wxchan's avatar
wxchan committed
216
217
218
219
220
221
            if is_valid_contain_train:
                evaluation_result_list.extend(booster.eval_train(feval))
            evaluation_result_list.extend(booster.eval_valid(feval))
        try:
            for cb in callbacks_after_iter:
                cb(callback.CallbackEnv(model=booster,
222
                                        params=params,
wxchan's avatar
wxchan committed
223
                                        iteration=i,
224
225
                                        begin_iteration=init_iteration,
                                        end_iteration=init_iteration + num_boost_round,
wxchan's avatar
wxchan committed
226
                                        evaluation_result_list=evaluation_result_list))
227
228
        except callback.EarlyStopException as earlyStopException:
            booster.best_iteration = earlyStopException.best_iteration + 1
wxchan's avatar
wxchan committed
229
            evaluation_result_list = earlyStopException.best_score
wxchan's avatar
wxchan committed
230
            break
wxchan's avatar
wxchan committed
231
232
233
    booster.best_score = collections.defaultdict(dict)
    for dataset_name, eval_name, score, _ in evaluation_result_list:
        booster.best_score[dataset_name][eval_name] = score
234
    if not keep_training_booster:
Nikita Titov's avatar
Nikita Titov committed
235
        booster.model_from_string(booster.model_to_string(), False).free_dataset()
wxchan's avatar
wxchan committed
236
237
238
239
    return booster


class CVBooster(object):
240
241
242
    """"Auxiliary data struct to hold all boosters of CV."""
    def __init__(self):
        self.boosters = []
243
        self.best_iteration = -1
244
245
246
247
248
249
250
251
252
253
254
255
256
257

    def append(self, booster):
        """add a booster to CVBooster"""
        self.boosters.append(booster)

    def __getattr__(self, name):
        """redirect methods call of CVBooster"""
        def handlerFunction(*args, **kwargs):
            """call methods with each booster, and concatenate their results"""
            ret = []
            for booster in self.boosters:
                ret.append(getattr(booster, name)(*args, **kwargs))
            return ret
        return handlerFunction
wxchan's avatar
wxchan committed
258

259

260
def _make_n_folds(full_data, folds, nfold, params, seed, fpreproc=None, stratified=True, shuffle=True):
wxchan's avatar
wxchan committed
261
    """
262
    Make an n-fold list of Booster from random indices.
wxchan's avatar
wxchan committed
263
    """
wxchan's avatar
wxchan committed
264
265
    full_data = full_data.construct()
    num_data = full_data.num_data()
266
    if folds is not None:
267
268
269
270
271
272
273
274
275
276
277
        if not hasattr(folds, '__iter__') and not hasattr(folds, 'split'):
            raise AttributeError("folds should be a generator or iterator of (train_idx, test_idx) tuples "
                                 "or scikit-learn splitter object with split method")
        if hasattr(folds, 'split'):
            group_info = full_data.get_group()
            if group_info is not None:
                group_info = group_info.astype(int)
                flatted_group = np.repeat(range_(len(group_info)), repeats=group_info)
            else:
                flatted_group = np.zeros(num_data, dtype=int)
            folds = folds.split(X=np.zeros(num_data), y=full_data.get_label(), groups=flatted_group)
wxchan's avatar
wxchan committed
278
    else:
wxchan's avatar
wxchan committed
279
280
281
282
283
        if 'objective' in params and params['objective'] == 'lambdarank':
            if not SKLEARN_INSTALLED:
                raise LightGBMError('Scikit-learn is required for lambdarank cv.')
            # lambdarank task, split according to groups
            group_info = full_data.get_group().astype(int)
284
            flatted_group = np.repeat(range_(len(group_info)), repeats=group_info)
285
            group_kfold = _LGBMGroupKFold(n_splits=nfold)
wxchan's avatar
wxchan committed
286
287
288
289
            folds = group_kfold.split(X=np.zeros(num_data), groups=flatted_group)
        elif stratified:
            if not SKLEARN_INSTALLED:
                raise LightGBMError('Scikit-learn is required for stratified cv.')
290
            skf = _LGBMStratifiedKFold(n_splits=nfold, shuffle=shuffle, random_state=seed)
wxchan's avatar
wxchan committed
291
            folds = skf.split(X=np.zeros(num_data), y=full_data.get_label())
extremin's avatar
extremin committed
292
        else:
wxchan's avatar
wxchan committed
293
294
295
296
297
298
299
            if shuffle:
                randidx = np.random.RandomState(seed).permutation(num_data)
            else:
                randidx = np.arange(num_data)
            kstep = int(num_data / nfold)
            test_id = [randidx[i: i + kstep] for i in range_(0, num_data, kstep)]
            train_id = [np.concatenate([test_id[i] for i in range_(nfold) if k != i]) for k in range_(nfold)]
300
            folds = zip_(train_id, test_id)
wxchan's avatar
wxchan committed
301

302
    ret = CVBooster()
wxchan's avatar
wxchan committed
303
304
305
    for train_idx, test_idx in folds:
        train_set = full_data.subset(train_idx)
        valid_set = full_data.subset(test_idx)
wxchan's avatar
wxchan committed
306
307
        # run preprocessing on the data set if needed
        if fpreproc is not None:
wxchan's avatar
wxchan committed
308
            train_set, valid_set, tparam = fpreproc(train_set, valid_set, params.copy())
wxchan's avatar
wxchan committed
309
        else:
wxchan's avatar
wxchan committed
310
            tparam = params
311
312
313
        cvbooster = Booster(tparam, train_set)
        cvbooster.add_valid(valid_set, 'valid')
        ret.append(cvbooster)
wxchan's avatar
wxchan committed
314
315
    return ret

wxchan's avatar
wxchan committed
316

wxchan's avatar
wxchan committed
317
318
319
320
def _agg_cv_result(raw_results):
    """
    Aggregate cross-validation results.
    """
wxchan's avatar
wxchan committed
321
    cvmap = collections.defaultdict(list)
wxchan's avatar
wxchan committed
322
323
324
    metric_type = {}
    for one_result in raw_results:
        for one_line in one_result:
wxchan's avatar
wxchan committed
325
326
327
            metric_type[one_line[1]] = one_line[3]
            cvmap[one_line[1]].append(one_line[2])
    return [('cv_agg', k, np.mean(v), metric_type[k], np.std(v)) for k, v in cvmap.items()]
wxchan's avatar
wxchan committed
328

wxchan's avatar
wxchan committed
329

330
def cv(params, train_set, num_boost_round=100,
331
       folds=None, nfold=5, stratified=True, shuffle=True,
wxchan's avatar
wxchan committed
332
       metrics=None, fobj=None, feval=None, init_model=None,
333
       feature_name='auto', categorical_feature='auto',
Guolin Ke's avatar
Guolin Ke committed
334
335
       early_stopping_rounds=None, fpreproc=None,
       verbose_eval=None, show_stdv=True, seed=0,
wxchan's avatar
wxchan committed
336
       callbacks=None):
337
    """Perform the cross-validation with given paramaters.
wxchan's avatar
wxchan committed
338
339
340
341

    Parameters
    ----------
    params : dict
342
        Parameters for Booster.
Guolin Ke's avatar
Guolin Ke committed
343
    train_set : Dataset
344
        Data to be trained on.
345
    num_boost_round : int, optional (default=100)
wxchan's avatar
wxchan committed
346
        Number of boosting iterations.
347
    folds : generator or iterator of (train_idx, test_idx) tuples, scikit-learn splitter object or None, optional (default=None)
348
349
350
351
        If generator or iterator, it should yield the train and test indices for the each fold.
        If object, it should be one of the scikit-learn splitter classes
        (http://scikit-learn.org/stable/modules/classes.html#splitter-classes)
        and have ``split`` method.
352
        This argument has highest priority over other data split arguments.
353
    nfold : int, optional (default=5)
wxchan's avatar
wxchan committed
354
        Number of folds in CV.
355
356
357
358
359
360
361
362
    stratified : bool, optional (default=True)
        Whether to perform stratified sampling.
    shuffle: bool, optional (default=True)
        Whether to shuffle before splitting data.
    metrics : string, list of strings or None, optional (default=None)
        Evaluation metrics to be monitored while CV.
        If not None, the metric in ``params`` will be overridden.
    fobj : callable or None, optional (default=None)
wxchan's avatar
wxchan committed
363
        Custom objective function.
364
    feval : callable or None, optional (default=None)
365
366
367
368
369
        Customized evaluation function.
        Should accept two parameters: preds, train_data.
        For multi-class task, the preds is group by class_id first, then group by row_id.
        If you want to get i-th row preds in j-th class, the access way is preds[j * num_data + i].
        Note: should return (eval_name, eval_result, is_higher_better) or list of such tuples.
370
371
        To ignore the default metric corresponding to the used objective,
        set ``metrics`` to the string ``"None"``.
372
    init_model : string, Booster or None, optional (default=None)
373
374
375
376
377
378
379
380
381
        Filename of LightGBM model or Booster instance used for continue training.
    feature_name : list of strings or 'auto', optional (default="auto")
        Feature names.
        If 'auto' and data is pandas DataFrame, data columns names are used.
    categorical_feature : list of strings or int, or 'auto', optional (default="auto")
        Categorical features.
        If list of int, interpreted as indices.
        If list of strings, interpreted as feature names (need to specify ``feature_name`` as well).
        If 'auto' and data is pandas DataFrame, pandas categorical columns are used.
382
        All values in categorical features should be less than int32 max value (2147483647).
383
        Large values could be memory consuming. Consider to use consecutive integers started from zero.
384
        All negative values in categorical features will be treated as missing values.
385
    early_stopping_rounds: int or None, optional (default=None)
386
387
388
389
        Activates early stopping.
        CV score needs to improve at least every ``early_stopping_rounds`` round(s)
        to continue.
        Requires at least one metric. If there's more than one, will check all of them.
wxchan's avatar
wxchan committed
390
        Last entry in evaluation history is the one from best iteration.
391
392
    fpreproc : callable or None, optional (default=None)
        Preprocessing function that takes (dtrain, dtest, params)
wxchan's avatar
wxchan committed
393
        and returns transformed versions of those.
394
    verbose_eval : bool, int, or None, optional (default=None)
wxchan's avatar
wxchan committed
395
396
        Whether to display the progress.
        If None, progress will be displayed when np.ndarray is returned.
397
398
399
        If True, progress will be displayed at every boosting stage.
        If int, progress will be displayed at every given ``verbose_eval`` boosting stage.
    show_stdv : bool, optional (default=True)
wxchan's avatar
wxchan committed
400
        Whether to display the standard deviation in progress.
401
402
        Results are not affected by this parameter, and always contains std.
    seed : int, optional (default=0)
wxchan's avatar
wxchan committed
403
        Seed used to generate the folds (passed to numpy.random.seed).
404
    callbacks : list of callables or None, optional (default=None)
405
        List of callback functions that are applied at each iteration.
406
        See Callbacks in Python API for more information.
wxchan's avatar
wxchan committed
407
408
409

    Returns
    -------
410
411
412
413
    eval_hist : dict
        Evaluation history.
        The dictionary has the following format:
        {'metric1-mean': [values], 'metric1-stdv': [values],
Qiwei Ye's avatar
Qiwei Ye committed
414
        'metric2-mean': [values], 'metric2-stdv': [values],
415
        ...}.
wxchan's avatar
wxchan committed
416
    """
Guolin Ke's avatar
Guolin Ke committed
417
    if not isinstance(train_set, Dataset):
418
        raise TypeError("Traninig only accepts Dataset object")
Guolin Ke's avatar
Guolin Ke committed
419

420
421
    for alias in ["num_iterations", "num_iteration", "n_iter", "num_tree", "num_trees",
                  "num_round", "num_rounds", "num_boost_round", "n_estimators"]:
422
423
424
425
426
427
428
429
430
431
        if alias in params:
            warnings.warn("Found `{}` in params. Will use it instead of argument".format(alias))
            num_boost_round = params.pop(alias)
            break
    for alias in ["early_stopping_round", "early_stopping_rounds", "early_stopping"]:
        if alias in params:
            warnings.warn("Found `{}` in params. Will use it instead of argument".format(alias))
            early_stopping_rounds = params.pop(alias)
            break

432
433
    if num_boost_round <= 0:
        raise ValueError("num_boost_round should be greater than zero.")
wxchan's avatar
wxchan committed
434
    if isinstance(init_model, string_type):
435
        predictor = _InnerPredictor(model_file=init_model, pred_parameter=params)
Guolin Ke's avatar
Guolin Ke committed
436
    elif isinstance(init_model, Booster):
437
        predictor = init_model._to_predictor(dict(init_model.params, **params))
Guolin Ke's avatar
Guolin Ke committed
438
439
    else:
        predictor = None
440
441
442
443
    train_set._update_params(params) \
             ._set_predictor(predictor) \
             .set_feature_name(feature_name) \
             .set_categorical_feature(categorical_feature)
Guolin Ke's avatar
Guolin Ke committed
444

Peter's avatar
Peter committed
445
446
    if metrics is not None:
        params['metric'] = metrics
wxchan's avatar
wxchan committed
447

wxchan's avatar
wxchan committed
448
    results = collections.defaultdict(list)
449
450
451
    cvfolds = _make_n_folds(train_set, folds=folds, nfold=nfold,
                            params=params, seed=seed, fpreproc=fpreproc,
                            stratified=stratified, shuffle=shuffle)
wxchan's avatar
wxchan committed
452
453

    # setup callbacks
454
    if callbacks is None:
wxchan's avatar
wxchan committed
455
456
457
458
459
        callbacks = set()
    else:
        for i, cb in enumerate(callbacks):
            cb.__dict__.setdefault('order', i - len(callbacks))
        callbacks = set(callbacks)
460
    if early_stopping_rounds is not None:
461
        callbacks.add(callback.early_stopping(early_stopping_rounds, verbose=False))
wxchan's avatar
wxchan committed
462
463
    if verbose_eval is True:
        callbacks.add(callback.print_evaluation(show_stdv=show_stdv))
wxchan's avatar
wxchan committed
464
    elif isinstance(verbose_eval, integer_types):
wxchan's avatar
wxchan committed
465
        callbacks.add(callback.print_evaluation(verbose_eval, show_stdv=show_stdv))
wxchan's avatar
wxchan committed
466

wxchan's avatar
wxchan committed
467
468
469
470
    callbacks_before_iter = {cb for cb in callbacks if getattr(cb, 'before_iteration', False)}
    callbacks_after_iter = callbacks - callbacks_before_iter
    callbacks_before_iter = sorted(callbacks_before_iter, key=attrgetter('order'))
    callbacks_after_iter = sorted(callbacks_after_iter, key=attrgetter('order'))
wxchan's avatar
wxchan committed
471

wxchan's avatar
wxchan committed
472
    for i in range_(num_boost_round):
wxchan's avatar
wxchan committed
473
        for cb in callbacks_before_iter:
474
475
            cb(callback.CallbackEnv(model=cvfolds,
                                    params=params,
wxchan's avatar
wxchan committed
476
477
478
479
                                    iteration=i,
                                    begin_iteration=0,
                                    end_iteration=num_boost_round,
                                    evaluation_result_list=None))
wxchan's avatar
wxchan committed
480
        cvfolds.update(fobj=fobj)
481
        res = _agg_cv_result(cvfolds.eval_valid(feval))
wxchan's avatar
wxchan committed
482
483
        for _, key, mean, _, std in res:
            results[key + '-mean'].append(mean)
wxchan's avatar
wxchan committed
484
            results[key + '-stdv'].append(std)
wxchan's avatar
wxchan committed
485
486
        try:
            for cb in callbacks_after_iter:
487
488
                cb(callback.CallbackEnv(model=cvfolds,
                                        params=params,
wxchan's avatar
wxchan committed
489
490
491
492
                                        iteration=i,
                                        begin_iteration=0,
                                        end_iteration=num_boost_round,
                                        evaluation_result_list=res))
493
494
        except callback.EarlyStopException as earlyStopException:
            cvfolds.best_iteration = earlyStopException.best_iteration + 1
wxchan's avatar
wxchan committed
495
            for k in results:
496
                results[k] = results[k][:cvfolds.best_iteration]
wxchan's avatar
wxchan committed
497
            break
wxchan's avatar
wxchan committed
498
    return dict(results)