engine.py 23.8 KB
Newer Older
wxchan's avatar
wxchan committed
1
2
# coding: utf-8
# pylint: disable = invalid-name, W0105
3
"""Library with training routines of LightGBM."""
wxchan's avatar
wxchan committed
4
5
from __future__ import absolute_import

wxchan's avatar
wxchan committed
6
import collections
7
import copy
8
import warnings
wxchan's avatar
wxchan committed
9
from operator import attrgetter
10

wxchan's avatar
wxchan committed
11
import numpy as np
12

wxchan's avatar
wxchan committed
13
from . import callback
wxchan's avatar
wxchan committed
14
from .basic import Booster, Dataset, LightGBMError, _InnerPredictor
15
from .compat import (SKLEARN_INSTALLED, _LGBMGroupKFold, _LGBMStratifiedKFold,
16
                     string_type, integer_types, range_, zip_)
wxchan's avatar
wxchan committed
17

wxchan's avatar
wxchan committed
18

Guolin Ke's avatar
Guolin Ke committed
19
20
def train(params, train_set, num_boost_round=100,
          valid_sets=None, valid_names=None,
wxchan's avatar
wxchan committed
21
          fobj=None, feval=None, init_model=None,
22
          feature_name='auto', categorical_feature='auto',
wxchan's avatar
wxchan committed
23
          early_stopping_rounds=None, evals_result=None,
24
25
          verbose_eval=True, learning_rates=None,
          keep_training_booster=False, callbacks=None):
26
    """Perform the training with given parameters.
wxchan's avatar
wxchan committed
27
28
29
30

    Parameters
    ----------
    params : dict
31
        Parameters for training.
Guolin Ke's avatar
Guolin Ke committed
32
    train_set : Dataset
33
34
        Data to be trained on.
    num_boost_round : int, optional (default=100)
wxchan's avatar
wxchan committed
35
        Number of boosting iterations.
36
37
38
    valid_sets : list of Datasets or None, optional (default=None)
        List of data to be evaluated on during training.
    valid_names : list of strings or None, optional (default=None)
39
40
        Names of ``valid_sets``.
    fobj : callable or None, optional (default=None)
wxchan's avatar
wxchan committed
41
        Customized objective function.
42
    feval : callable or None, optional (default=None)
wxchan's avatar
wxchan committed
43
        Customized evaluation function.
44
45
        Should accept two parameters: preds, train_data,
        and return (eval_name, eval_result, is_higher_better) or list of such tuples.
46
47
        For multi-class task, the preds is group by class_id first, then group by row_id.
        If you want to get i-th row preds in j-th class, the access way is preds[j * num_data + i].
48
49
        To ignore the default metric corresponding to the used objective,
        set the ``metric`` parameter to the string ``"None"`` in ``params``.
50
    init_model : string, Booster or None, optional (default=None)
51
52
53
54
55
56
57
58
59
        Filename of LightGBM model or Booster instance used for continue training.
    feature_name : list of strings or 'auto', optional (default="auto")
        Feature names.
        If 'auto' and data is pandas DataFrame, data columns names are used.
    categorical_feature : list of strings or int, or 'auto', optional (default="auto")
        Categorical features.
        If list of int, interpreted as indices.
        If list of strings, interpreted as feature names (need to specify ``feature_name`` as well).
        If 'auto' and data is pandas DataFrame, pandas categorical columns are used.
60
        All values in categorical features should be less than int32 max value (2147483647).
61
        Large values could be memory consuming. Consider using consecutive integers starting from zero.
62
        All negative values in categorical features will be treated as missing values.
63
    early_stopping_rounds : int or None, optional (default=None)
64
        Activates early stopping. The model will train until the validation score stops improving.
65
66
67
68
        Validation score needs to improve at least every ``early_stopping_rounds`` round(s)
        to continue training.
        Requires at least one validation data and one metric.
        If there's more than one, will check all of them. But the training data is ignored anyway.
69
70
71
        The index of iteration that has the best performance will be saved in the ``best_iteration`` field
        if early stopping logic is enabled by setting ``early_stopping_rounds``.
    evals_result: dict or None, optional (default=None)
72
73
74
75
76
77
        This dictionary used to store all evaluation results of all the items in ``valid_sets``.

        Example
        -------
        With a ``valid_sets`` = [valid_set, train_set],
        ``valid_names`` = ['eval', 'train']
78
79
        and a ``params`` = {'metric': 'logloss'}
        returns {'train': {'logloss': ['0.48253', '0.35953', ...]},
80
        'eval': {'logloss': ['0.480385', '0.357756', ...]}}.
81

82
83
84
85
86
87
88
89
    verbose_eval : bool or int, optional (default=True)
        Requires at least one validation data.
        If True, the eval metric on the valid set is printed at each boosting stage.
        If int, the eval metric on the valid set is printed at every ``verbose_eval`` boosting stage.
        The last boosting stage or the boosting stage found by using ``early_stopping_rounds`` is also printed.

        Example
        -------
90
        With ``verbose_eval`` = 4 and at least one item in ``valid_sets``,
91
        an evaluation metric is printed every 4 (instead of 1) boosting stages.
92
93

    learning_rates : list, callable or None, optional (default=None)
94
95
96
97
98
99
100
101
        List of learning rates for each boosting round
        or a customized function that calculates ``learning_rate``
        in terms of current number of round (e.g. yields learning rate decay).
    keep_training_booster : bool, optional (default=False)
        Whether the returned Booster will be used to keep training.
        If False, the returned value will be converted into _InnerPredictor before returning.
        You can still use _InnerPredictor as ``init_model`` for future continue training.
    callbacks : list of callables or None, optional (default=None)
102
        List of callback functions that are applied at each iteration.
103
        See Callbacks in Python API for more information.
wxchan's avatar
wxchan committed
104
105
106

    Returns
    -------
107
108
    booster : Booster
        The trained Booster model.
wxchan's avatar
wxchan committed
109
    """
110
    # create predictor first
111
    params = copy.deepcopy(params)
112
113
    if fobj is not None:
        params['objective'] = 'none'
114
115
    for alias in ["num_iterations", "num_iteration", "n_iter", "num_tree", "num_trees",
                  "num_round", "num_rounds", "num_boost_round", "n_estimators"]:
116
        if alias in params:
117
            num_boost_round = int(params.pop(alias))
118
119
120
            warnings.warn("Found `{}` in params. Will use it instead of argument".format(alias))
            break
    for alias in ["early_stopping_round", "early_stopping_rounds", "early_stopping"]:
121
122
        if alias in params and params[alias] is not None:
            early_stopping_rounds = int(params.pop(alias))
123
124
125
            warnings.warn("Found `{}` in params. Will use it instead of argument".format(alias))
            break

126
127
    if num_boost_round <= 0:
        raise ValueError("num_boost_round should be greater than zero.")
wxchan's avatar
wxchan committed
128
    if isinstance(init_model, string_type):
129
        predictor = _InnerPredictor(model_file=init_model, pred_parameter=params)
wxchan's avatar
wxchan committed
130
    elif isinstance(init_model, Booster):
131
        predictor = init_model._to_predictor(dict(init_model.params, **params))
wxchan's avatar
wxchan committed
132
133
    else:
        predictor = None
134
    init_iteration = predictor.num_total_iteration if predictor is not None else 0
135
    # check dataset
Guolin Ke's avatar
Guolin Ke committed
136
    if not isinstance(train_set, Dataset):
137
        raise TypeError("Training only accepts Dataset object")
Guolin Ke's avatar
Guolin Ke committed
138

139
140
141
142
    train_set._update_params(params) \
             ._set_predictor(predictor) \
             .set_feature_name(feature_name) \
             .set_categorical_feature(categorical_feature)
Guolin Ke's avatar
Guolin Ke committed
143

wxchan's avatar
wxchan committed
144
145
    is_valid_contain_train = False
    train_data_name = "training"
Guolin Ke's avatar
Guolin Ke committed
146
    reduced_valid_sets = []
wxchan's avatar
wxchan committed
147
    name_valid_sets = []
148
    if valid_sets is not None:
Guolin Ke's avatar
Guolin Ke committed
149
150
        if isinstance(valid_sets, Dataset):
            valid_sets = [valid_sets]
wxchan's avatar
wxchan committed
151
        if isinstance(valid_names, string_type):
wxchan's avatar
wxchan committed
152
            valid_names = [valid_names]
Guolin Ke's avatar
Guolin Ke committed
153
        for i, valid_data in enumerate(valid_sets):
154
            # reduce cost for prediction training data
Guolin Ke's avatar
Guolin Ke committed
155
            if valid_data is train_set:
wxchan's avatar
wxchan committed
156
157
158
159
                is_valid_contain_train = True
                if valid_names is not None:
                    train_data_name = valid_names[i]
                continue
Guolin Ke's avatar
Guolin Ke committed
160
            if not isinstance(valid_data, Dataset):
161
                raise TypeError("Traninig only accepts Dataset object")
Nikita Titov's avatar
Nikita Titov committed
162
            reduced_valid_sets.append(valid_data._update_params(params).set_reference(train_set))
163
            if valid_names is not None and len(valid_names) > i:
wxchan's avatar
wxchan committed
164
165
                name_valid_sets.append(valid_names[i])
            else:
wxchan's avatar
wxchan committed
166
                name_valid_sets.append('valid_' + str(i))
167
    # process callbacks
168
    if callbacks is None:
wxchan's avatar
wxchan committed
169
170
171
172
173
        callbacks = set()
    else:
        for i, cb in enumerate(callbacks):
            cb.__dict__.setdefault('order', i - len(callbacks))
        callbacks = set(callbacks)
wxchan's avatar
wxchan committed
174
175

    # Most of legacy advanced options becomes callbacks
wxchan's avatar
wxchan committed
176
177
    if verbose_eval is True:
        callbacks.add(callback.print_evaluation())
wxchan's avatar
wxchan committed
178
    elif isinstance(verbose_eval, integer_types):
wxchan's avatar
wxchan committed
179
        callbacks.add(callback.print_evaluation(verbose_eval))
wxchan's avatar
wxchan committed
180

181
    if early_stopping_rounds is not None:
182
        callbacks.add(callback.early_stopping(early_stopping_rounds, verbose=bool(verbose_eval)))
183

wxchan's avatar
wxchan committed
184
    if learning_rates is not None:
185
        callbacks.add(callback.reset_parameter(learning_rate=learning_rates))
wxchan's avatar
wxchan committed
186
187

    if evals_result is not None:
wxchan's avatar
wxchan committed
188
189
190
191
192
193
        callbacks.add(callback.record_evaluation(evals_result))

    callbacks_before_iter = {cb for cb in callbacks if getattr(cb, 'before_iteration', False)}
    callbacks_after_iter = callbacks - callbacks_before_iter
    callbacks_before_iter = sorted(callbacks_before_iter, key=attrgetter('order'))
    callbacks_after_iter = sorted(callbacks_after_iter, key=attrgetter('order'))
wxchan's avatar
wxchan committed
194

195
    # construct booster
196
197
198
199
    try:
        booster = Booster(params=params, train_set=train_set)
        if is_valid_contain_train:
            booster.set_train_data_name(train_data_name)
200
        for valid_set, name_valid_set in zip_(reduced_valid_sets, name_valid_sets):
201
202
203
204
205
            booster.add_valid(valid_set, name_valid_set)
    finally:
        train_set._reverse_update_params()
        for valid_set in reduced_valid_sets:
            valid_set._reverse_update_params()
206
    booster.best_iteration = 0
wxchan's avatar
wxchan committed
207

208
    # start training
wxchan's avatar
wxchan committed
209
    for i in range_(init_iteration, init_iteration + num_boost_round):
wxchan's avatar
wxchan committed
210
211
        for cb in callbacks_before_iter:
            cb(callback.CallbackEnv(model=booster,
212
                                    params=params,
wxchan's avatar
wxchan committed
213
                                    iteration=i,
214
215
                                    begin_iteration=init_iteration,
                                    end_iteration=init_iteration + num_boost_round,
wxchan's avatar
wxchan committed
216
217
218
219
220
221
                                    evaluation_result_list=None))

        booster.update(fobj=fobj)

        evaluation_result_list = []
        # check evaluation result.
222
        if valid_sets is not None:
wxchan's avatar
wxchan committed
223
224
225
226
227
228
            if is_valid_contain_train:
                evaluation_result_list.extend(booster.eval_train(feval))
            evaluation_result_list.extend(booster.eval_valid(feval))
        try:
            for cb in callbacks_after_iter:
                cb(callback.CallbackEnv(model=booster,
229
                                        params=params,
wxchan's avatar
wxchan committed
230
                                        iteration=i,
231
232
                                        begin_iteration=init_iteration,
                                        end_iteration=init_iteration + num_boost_round,
wxchan's avatar
wxchan committed
233
                                        evaluation_result_list=evaluation_result_list))
234
235
        except callback.EarlyStopException as earlyStopException:
            booster.best_iteration = earlyStopException.best_iteration + 1
wxchan's avatar
wxchan committed
236
            evaluation_result_list = earlyStopException.best_score
wxchan's avatar
wxchan committed
237
            break
wxchan's avatar
wxchan committed
238
239
240
    booster.best_score = collections.defaultdict(dict)
    for dataset_name, eval_name, score, _ in evaluation_result_list:
        booster.best_score[dataset_name][eval_name] = score
241
    if not keep_training_booster:
Nikita Titov's avatar
Nikita Titov committed
242
        booster.model_from_string(booster.model_to_string(), False).free_dataset()
wxchan's avatar
wxchan committed
243
244
245
    return booster


246
247
248
class _CVBooster(object):
    """Auxiliary data struct to hold all boosters of CV."""

249
250
    def __init__(self):
        self.boosters = []
251
        self.best_iteration = -1
252
253

    def append(self, booster):
254
        """Add a booster to _CVBooster."""
255
256
257
        self.boosters.append(booster)

    def __getattr__(self, name):
258
259
260
        """Redirect methods call of _CVBooster."""
        def handler_function(*args, **kwargs):
            """Call methods with each booster, and concatenate their results."""
261
262
263
264
            ret = []
            for booster in self.boosters:
                ret.append(getattr(booster, name)(*args, **kwargs))
            return ret
265
        return handler_function
wxchan's avatar
wxchan committed
266

267

268
def _make_n_folds(full_data, folds, nfold, params, seed, fpreproc=None, stratified=True, shuffle=True):
269
    """Make a n-fold list of Booster from random indices."""
wxchan's avatar
wxchan committed
270
271
    full_data = full_data.construct()
    num_data = full_data.num_data()
272
    if folds is not None:
273
274
275
276
277
278
279
280
281
282
283
        if not hasattr(folds, '__iter__') and not hasattr(folds, 'split'):
            raise AttributeError("folds should be a generator or iterator of (train_idx, test_idx) tuples "
                                 "or scikit-learn splitter object with split method")
        if hasattr(folds, 'split'):
            group_info = full_data.get_group()
            if group_info is not None:
                group_info = group_info.astype(int)
                flatted_group = np.repeat(range_(len(group_info)), repeats=group_info)
            else:
                flatted_group = np.zeros(num_data, dtype=int)
            folds = folds.split(X=np.zeros(num_data), y=full_data.get_label(), groups=flatted_group)
wxchan's avatar
wxchan committed
284
    else:
wxchan's avatar
wxchan committed
285
286
287
288
289
        if 'objective' in params and params['objective'] == 'lambdarank':
            if not SKLEARN_INSTALLED:
                raise LightGBMError('Scikit-learn is required for lambdarank cv.')
            # lambdarank task, split according to groups
            group_info = full_data.get_group().astype(int)
290
            flatted_group = np.repeat(range_(len(group_info)), repeats=group_info)
291
            group_kfold = _LGBMGroupKFold(n_splits=nfold)
wxchan's avatar
wxchan committed
292
293
294
295
            folds = group_kfold.split(X=np.zeros(num_data), groups=flatted_group)
        elif stratified:
            if not SKLEARN_INSTALLED:
                raise LightGBMError('Scikit-learn is required for stratified cv.')
296
            skf = _LGBMStratifiedKFold(n_splits=nfold, shuffle=shuffle, random_state=seed)
wxchan's avatar
wxchan committed
297
            folds = skf.split(X=np.zeros(num_data), y=full_data.get_label())
extremin's avatar
extremin committed
298
        else:
wxchan's avatar
wxchan committed
299
300
301
302
303
304
305
            if shuffle:
                randidx = np.random.RandomState(seed).permutation(num_data)
            else:
                randidx = np.arange(num_data)
            kstep = int(num_data / nfold)
            test_id = [randidx[i: i + kstep] for i in range_(0, num_data, kstep)]
            train_id = [np.concatenate([test_id[i] for i in range_(nfold) if k != i]) for k in range_(nfold)]
306
            folds = zip_(train_id, test_id)
wxchan's avatar
wxchan committed
307

308
    ret = _CVBooster()
wxchan's avatar
wxchan committed
309
310
311
    for train_idx, test_idx in folds:
        train_set = full_data.subset(train_idx)
        valid_set = full_data.subset(test_idx)
wxchan's avatar
wxchan committed
312
313
        # run preprocessing on the data set if needed
        if fpreproc is not None:
wxchan's avatar
wxchan committed
314
            train_set, valid_set, tparam = fpreproc(train_set, valid_set, params.copy())
wxchan's avatar
wxchan committed
315
        else:
wxchan's avatar
wxchan committed
316
            tparam = params
317
318
319
        cvbooster = Booster(tparam, train_set)
        cvbooster.add_valid(valid_set, 'valid')
        ret.append(cvbooster)
wxchan's avatar
wxchan committed
320
321
    return ret

wxchan's avatar
wxchan committed
322

wxchan's avatar
wxchan committed
323
def _agg_cv_result(raw_results):
324
    """Aggregate cross-validation results."""
wxchan's avatar
wxchan committed
325
    cvmap = collections.defaultdict(list)
wxchan's avatar
wxchan committed
326
327
328
    metric_type = {}
    for one_result in raw_results:
        for one_line in one_result:
wxchan's avatar
wxchan committed
329
330
331
            metric_type[one_line[1]] = one_line[3]
            cvmap[one_line[1]].append(one_line[2])
    return [('cv_agg', k, np.mean(v), metric_type[k], np.std(v)) for k, v in cvmap.items()]
wxchan's avatar
wxchan committed
332

wxchan's avatar
wxchan committed
333

334
def cv(params, train_set, num_boost_round=100,
335
       folds=None, nfold=5, stratified=True, shuffle=True,
wxchan's avatar
wxchan committed
336
       metrics=None, fobj=None, feval=None, init_model=None,
337
       feature_name='auto', categorical_feature='auto',
Guolin Ke's avatar
Guolin Ke committed
338
339
       early_stopping_rounds=None, fpreproc=None,
       verbose_eval=None, show_stdv=True, seed=0,
wxchan's avatar
wxchan committed
340
       callbacks=None):
341
    """Perform the cross-validation with given paramaters.
wxchan's avatar
wxchan committed
342
343
344
345

    Parameters
    ----------
    params : dict
346
        Parameters for Booster.
Guolin Ke's avatar
Guolin Ke committed
347
    train_set : Dataset
348
        Data to be trained on.
349
    num_boost_round : int, optional (default=100)
wxchan's avatar
wxchan committed
350
        Number of boosting iterations.
351
    folds : generator or iterator of (train_idx, test_idx) tuples, scikit-learn splitter object or None, optional (default=None)
352
        If generator or iterator, it should yield the train and test indices for each fold.
353
354
355
        If object, it should be one of the scikit-learn splitter classes
        (http://scikit-learn.org/stable/modules/classes.html#splitter-classes)
        and have ``split`` method.
356
        This argument has highest priority over other data split arguments.
357
    nfold : int, optional (default=5)
wxchan's avatar
wxchan committed
358
        Number of folds in CV.
359
360
    stratified : bool, optional (default=True)
        Whether to perform stratified sampling.
361
    shuffle : bool, optional (default=True)
362
363
364
365
366
        Whether to shuffle before splitting data.
    metrics : string, list of strings or None, optional (default=None)
        Evaluation metrics to be monitored while CV.
        If not None, the metric in ``params`` will be overridden.
    fobj : callable or None, optional (default=None)
wxchan's avatar
wxchan committed
367
        Custom objective function.
368
    feval : callable or None, optional (default=None)
369
        Customized evaluation function.
370
371
        Should accept two parameters: preds, train_data,
        and return (eval_name, eval_result, is_higher_better) or list of such tuples.
372
373
        For multi-class task, the preds is group by class_id first, then group by row_id.
        If you want to get i-th row preds in j-th class, the access way is preds[j * num_data + i].
374
375
        To ignore the default metric corresponding to the used objective,
        set ``metrics`` to the string ``"None"``.
376
    init_model : string, Booster or None, optional (default=None)
377
378
379
380
381
382
383
384
385
        Filename of LightGBM model or Booster instance used for continue training.
    feature_name : list of strings or 'auto', optional (default="auto")
        Feature names.
        If 'auto' and data is pandas DataFrame, data columns names are used.
    categorical_feature : list of strings or int, or 'auto', optional (default="auto")
        Categorical features.
        If list of int, interpreted as indices.
        If list of strings, interpreted as feature names (need to specify ``feature_name`` as well).
        If 'auto' and data is pandas DataFrame, pandas categorical columns are used.
386
        All values in categorical features should be less than int32 max value (2147483647).
387
        Large values could be memory consuming. Consider using consecutive integers starting from zero.
388
        All negative values in categorical features will be treated as missing values.
389
    early_stopping_rounds : int or None, optional (default=None)
390
391
392
393
        Activates early stopping.
        CV score needs to improve at least every ``early_stopping_rounds`` round(s)
        to continue.
        Requires at least one metric. If there's more than one, will check all of them.
394
        Last entry in evaluation history is the one from the best iteration.
395
396
    fpreproc : callable or None, optional (default=None)
        Preprocessing function that takes (dtrain, dtest, params)
wxchan's avatar
wxchan committed
397
        and returns transformed versions of those.
398
    verbose_eval : bool, int, or None, optional (default=None)
wxchan's avatar
wxchan committed
399
400
        Whether to display the progress.
        If None, progress will be displayed when np.ndarray is returned.
401
402
403
        If True, progress will be displayed at every boosting stage.
        If int, progress will be displayed at every given ``verbose_eval`` boosting stage.
    show_stdv : bool, optional (default=True)
wxchan's avatar
wxchan committed
404
        Whether to display the standard deviation in progress.
405
        Results are not affected by this parameter, and always contain std.
406
    seed : int, optional (default=0)
wxchan's avatar
wxchan committed
407
        Seed used to generate the folds (passed to numpy.random.seed).
408
    callbacks : list of callables or None, optional (default=None)
409
        List of callback functions that are applied at each iteration.
410
        See Callbacks in Python API for more information.
wxchan's avatar
wxchan committed
411
412
413

    Returns
    -------
414
415
416
417
    eval_hist : dict
        Evaluation history.
        The dictionary has the following format:
        {'metric1-mean': [values], 'metric1-stdv': [values],
Qiwei Ye's avatar
Qiwei Ye committed
418
        'metric2-mean': [values], 'metric2-stdv': [values],
419
        ...}.
wxchan's avatar
wxchan committed
420
    """
Guolin Ke's avatar
Guolin Ke committed
421
    if not isinstance(train_set, Dataset):
422
        raise TypeError("Traninig only accepts Dataset object")
Guolin Ke's avatar
Guolin Ke committed
423

424
    params = copy.deepcopy(params)
425
426
    if fobj is not None:
        params['objective'] = 'none'
427
428
    for alias in ["num_iterations", "num_iteration", "n_iter", "num_tree", "num_trees",
                  "num_round", "num_rounds", "num_boost_round", "n_estimators"]:
429
430
431
432
433
434
435
436
437
438
        if alias in params:
            warnings.warn("Found `{}` in params. Will use it instead of argument".format(alias))
            num_boost_round = params.pop(alias)
            break
    for alias in ["early_stopping_round", "early_stopping_rounds", "early_stopping"]:
        if alias in params:
            warnings.warn("Found `{}` in params. Will use it instead of argument".format(alias))
            early_stopping_rounds = params.pop(alias)
            break

439
440
    if num_boost_round <= 0:
        raise ValueError("num_boost_round should be greater than zero.")
wxchan's avatar
wxchan committed
441
    if isinstance(init_model, string_type):
442
        predictor = _InnerPredictor(model_file=init_model, pred_parameter=params)
Guolin Ke's avatar
Guolin Ke committed
443
    elif isinstance(init_model, Booster):
444
        predictor = init_model._to_predictor(dict(init_model.params, **params))
Guolin Ke's avatar
Guolin Ke committed
445
446
    else:
        predictor = None
447
448
449
450
    train_set._update_params(params) \
             ._set_predictor(predictor) \
             .set_feature_name(feature_name) \
             .set_categorical_feature(categorical_feature)
Guolin Ke's avatar
Guolin Ke committed
451

Peter's avatar
Peter committed
452
453
    if metrics is not None:
        params['metric'] = metrics
wxchan's avatar
wxchan committed
454

wxchan's avatar
wxchan committed
455
    results = collections.defaultdict(list)
456
457
458
    cvfolds = _make_n_folds(train_set, folds=folds, nfold=nfold,
                            params=params, seed=seed, fpreproc=fpreproc,
                            stratified=stratified, shuffle=shuffle)
wxchan's avatar
wxchan committed
459
460

    # setup callbacks
461
    if callbacks is None:
wxchan's avatar
wxchan committed
462
463
464
465
466
        callbacks = set()
    else:
        for i, cb in enumerate(callbacks):
            cb.__dict__.setdefault('order', i - len(callbacks))
        callbacks = set(callbacks)
467
    if early_stopping_rounds is not None:
468
        callbacks.add(callback.early_stopping(early_stopping_rounds, verbose=False))
wxchan's avatar
wxchan committed
469
470
    if verbose_eval is True:
        callbacks.add(callback.print_evaluation(show_stdv=show_stdv))
wxchan's avatar
wxchan committed
471
    elif isinstance(verbose_eval, integer_types):
wxchan's avatar
wxchan committed
472
        callbacks.add(callback.print_evaluation(verbose_eval, show_stdv=show_stdv))
wxchan's avatar
wxchan committed
473

wxchan's avatar
wxchan committed
474
475
476
477
    callbacks_before_iter = {cb for cb in callbacks if getattr(cb, 'before_iteration', False)}
    callbacks_after_iter = callbacks - callbacks_before_iter
    callbacks_before_iter = sorted(callbacks_before_iter, key=attrgetter('order'))
    callbacks_after_iter = sorted(callbacks_after_iter, key=attrgetter('order'))
wxchan's avatar
wxchan committed
478

wxchan's avatar
wxchan committed
479
    for i in range_(num_boost_round):
wxchan's avatar
wxchan committed
480
        for cb in callbacks_before_iter:
481
482
            cb(callback.CallbackEnv(model=cvfolds,
                                    params=params,
wxchan's avatar
wxchan committed
483
484
485
486
                                    iteration=i,
                                    begin_iteration=0,
                                    end_iteration=num_boost_round,
                                    evaluation_result_list=None))
wxchan's avatar
wxchan committed
487
        cvfolds.update(fobj=fobj)
488
        res = _agg_cv_result(cvfolds.eval_valid(feval))
wxchan's avatar
wxchan committed
489
490
        for _, key, mean, _, std in res:
            results[key + '-mean'].append(mean)
wxchan's avatar
wxchan committed
491
            results[key + '-stdv'].append(std)
wxchan's avatar
wxchan committed
492
493
        try:
            for cb in callbacks_after_iter:
494
495
                cb(callback.CallbackEnv(model=cvfolds,
                                        params=params,
wxchan's avatar
wxchan committed
496
497
498
499
                                        iteration=i,
                                        begin_iteration=0,
                                        end_iteration=num_boost_round,
                                        evaluation_result_list=res))
500
501
        except callback.EarlyStopException as earlyStopException:
            cvfolds.best_iteration = earlyStopException.best_iteration + 1
wxchan's avatar
wxchan committed
502
            for k in results:
503
                results[k] = results[k][:cvfolds.best_iteration]
wxchan's avatar
wxchan committed
504
            break
wxchan's avatar
wxchan committed
505
    return dict(results)