engine.py 24.7 KB
Newer Older
wxchan's avatar
wxchan committed
1
2
# coding: utf-8
# pylint: disable = invalid-name, W0105
3
"""Library with training routines of LightGBM."""
wxchan's avatar
wxchan committed
4
5
from __future__ import absolute_import

wxchan's avatar
wxchan committed
6
import collections
7
import copy
8
import warnings
wxchan's avatar
wxchan committed
9
from operator import attrgetter
10

wxchan's avatar
wxchan committed
11
import numpy as np
12

wxchan's avatar
wxchan committed
13
from . import callback
wxchan's avatar
wxchan committed
14
from .basic import Booster, Dataset, LightGBMError, _InnerPredictor
15
from .compat import (SKLEARN_INSTALLED, _LGBMGroupKFold, _LGBMStratifiedKFold,
16
                     string_type, integer_types, range_, zip_)
wxchan's avatar
wxchan committed
17

wxchan's avatar
wxchan committed
18

Guolin Ke's avatar
Guolin Ke committed
19
20
def train(params, train_set, num_boost_round=100,
          valid_sets=None, valid_names=None,
wxchan's avatar
wxchan committed
21
          fobj=None, feval=None, init_model=None,
22
          feature_name='auto', categorical_feature='auto',
wxchan's avatar
wxchan committed
23
          early_stopping_rounds=None, evals_result=None,
24
25
          verbose_eval=True, learning_rates=None,
          keep_training_booster=False, callbacks=None):
26
    """Perform the training with given parameters.
wxchan's avatar
wxchan committed
27
28
29
30

    Parameters
    ----------
    params : dict
31
        Parameters for training.
Guolin Ke's avatar
Guolin Ke committed
32
    train_set : Dataset
33
34
        Data to be trained on.
    num_boost_round : int, optional (default=100)
wxchan's avatar
wxchan committed
35
        Number of boosting iterations.
36
37
38
    valid_sets : list of Datasets or None, optional (default=None)
        List of data to be evaluated on during training.
    valid_names : list of strings or None, optional (default=None)
39
40
        Names of ``valid_sets``.
    fobj : callable or None, optional (default=None)
wxchan's avatar
wxchan committed
41
        Customized objective function.
42
    feval : callable or None, optional (default=None)
wxchan's avatar
wxchan committed
43
        Customized evaluation function.
44
45
        Should accept two parameters: preds, train_data,
        and return (eval_name, eval_result, is_higher_better) or list of such tuples.
46
47
        For multi-class task, the preds is group by class_id first, then group by row_id.
        If you want to get i-th row preds in j-th class, the access way is preds[j * num_data + i].
48
49
        To ignore the default metric corresponding to the used objective,
        set the ``metric`` parameter to the string ``"None"`` in ``params``.
50
    init_model : string, Booster or None, optional (default=None)
51
52
53
54
55
56
57
58
        Filename of LightGBM model or Booster instance used for continue training.
    feature_name : list of strings or 'auto', optional (default="auto")
        Feature names.
        If 'auto' and data is pandas DataFrame, data columns names are used.
    categorical_feature : list of strings or int, or 'auto', optional (default="auto")
        Categorical features.
        If list of int, interpreted as indices.
        If list of strings, interpreted as feature names (need to specify ``feature_name`` as well).
59
        If 'auto' and data is pandas DataFrame, pandas unordered categorical columns are used.
60
        All values in categorical features should be less than int32 max value (2147483647).
61
        Large values could be memory consuming. Consider using consecutive integers starting from zero.
62
        All negative values in categorical features will be treated as missing values.
63
    early_stopping_rounds : int or None, optional (default=None)
64
        Activates early stopping. The model will train until the validation score stops improving.
65
66
67
68
        Validation score needs to improve at least every ``early_stopping_rounds`` round(s)
        to continue training.
        Requires at least one validation data and one metric.
        If there's more than one, will check all of them. But the training data is ignored anyway.
69
70
        To check only the first metric you can pass in ``callbacks``
        ``early_stopping`` callback with ``first_metric_only=True``.
71
72
73
        The index of iteration that has the best performance will be saved in the ``best_iteration`` field
        if early stopping logic is enabled by setting ``early_stopping_rounds``.
    evals_result: dict or None, optional (default=None)
74
75
76
77
78
79
        This dictionary used to store all evaluation results of all the items in ``valid_sets``.

        Example
        -------
        With a ``valid_sets`` = [valid_set, train_set],
        ``valid_names`` = ['eval', 'train']
80
81
        and a ``params`` = {'metric': 'logloss'}
        returns {'train': {'logloss': ['0.48253', '0.35953', ...]},
82
        'eval': {'logloss': ['0.480385', '0.357756', ...]}}.
83

84
85
86
87
88
89
90
91
    verbose_eval : bool or int, optional (default=True)
        Requires at least one validation data.
        If True, the eval metric on the valid set is printed at each boosting stage.
        If int, the eval metric on the valid set is printed at every ``verbose_eval`` boosting stage.
        The last boosting stage or the boosting stage found by using ``early_stopping_rounds`` is also printed.

        Example
        -------
92
        With ``verbose_eval`` = 4 and at least one item in ``valid_sets``,
93
        an evaluation metric is printed every 4 (instead of 1) boosting stages.
94
95

    learning_rates : list, callable or None, optional (default=None)
96
97
98
99
100
101
102
103
        List of learning rates for each boosting round
        or a customized function that calculates ``learning_rate``
        in terms of current number of round (e.g. yields learning rate decay).
    keep_training_booster : bool, optional (default=False)
        Whether the returned Booster will be used to keep training.
        If False, the returned value will be converted into _InnerPredictor before returning.
        You can still use _InnerPredictor as ``init_model`` for future continue training.
    callbacks : list of callables or None, optional (default=None)
104
        List of callback functions that are applied at each iteration.
105
        See Callbacks in Python API for more information.
wxchan's avatar
wxchan committed
106
107
108

    Returns
    -------
109
110
    booster : Booster
        The trained Booster model.
wxchan's avatar
wxchan committed
111
    """
112
    # create predictor first
113
    params = copy.deepcopy(params)
114
115
    if fobj is not None:
        params['objective'] = 'none'
116
117
    for alias in ["num_iterations", "num_iteration", "n_iter", "num_tree", "num_trees",
                  "num_round", "num_rounds", "num_boost_round", "n_estimators"]:
118
        if alias in params:
119
            num_boost_round = int(params.pop(alias))
120
121
122
            warnings.warn("Found `{}` in params. Will use it instead of argument".format(alias))
            break
    for alias in ["early_stopping_round", "early_stopping_rounds", "early_stopping"]:
123
124
        if alias in params and params[alias] is not None:
            early_stopping_rounds = int(params.pop(alias))
125
126
127
            warnings.warn("Found `{}` in params. Will use it instead of argument".format(alias))
            break

128
129
    if num_boost_round <= 0:
        raise ValueError("num_boost_round should be greater than zero.")
wxchan's avatar
wxchan committed
130
    if isinstance(init_model, string_type):
131
        predictor = _InnerPredictor(model_file=init_model, pred_parameter=params)
wxchan's avatar
wxchan committed
132
    elif isinstance(init_model, Booster):
133
        predictor = init_model._to_predictor(dict(init_model.params, **params))
wxchan's avatar
wxchan committed
134
135
    else:
        predictor = None
136
    init_iteration = predictor.num_total_iteration if predictor is not None else 0
137
    # check dataset
Guolin Ke's avatar
Guolin Ke committed
138
    if not isinstance(train_set, Dataset):
139
        raise TypeError("Training only accepts Dataset object")
Guolin Ke's avatar
Guolin Ke committed
140

141
142
143
144
    train_set._update_params(params) \
             ._set_predictor(predictor) \
             .set_feature_name(feature_name) \
             .set_categorical_feature(categorical_feature)
Guolin Ke's avatar
Guolin Ke committed
145

wxchan's avatar
wxchan committed
146
147
    is_valid_contain_train = False
    train_data_name = "training"
Guolin Ke's avatar
Guolin Ke committed
148
    reduced_valid_sets = []
wxchan's avatar
wxchan committed
149
    name_valid_sets = []
150
    if valid_sets is not None:
Guolin Ke's avatar
Guolin Ke committed
151
152
        if isinstance(valid_sets, Dataset):
            valid_sets = [valid_sets]
wxchan's avatar
wxchan committed
153
        if isinstance(valid_names, string_type):
wxchan's avatar
wxchan committed
154
            valid_names = [valid_names]
Guolin Ke's avatar
Guolin Ke committed
155
        for i, valid_data in enumerate(valid_sets):
156
            # reduce cost for prediction training data
Guolin Ke's avatar
Guolin Ke committed
157
            if valid_data is train_set:
wxchan's avatar
wxchan committed
158
159
160
161
                is_valid_contain_train = True
                if valid_names is not None:
                    train_data_name = valid_names[i]
                continue
Guolin Ke's avatar
Guolin Ke committed
162
            if not isinstance(valid_data, Dataset):
163
                raise TypeError("Traninig only accepts Dataset object")
Nikita Titov's avatar
Nikita Titov committed
164
            reduced_valid_sets.append(valid_data._update_params(params).set_reference(train_set))
165
            if valid_names is not None and len(valid_names) > i:
wxchan's avatar
wxchan committed
166
167
                name_valid_sets.append(valid_names[i])
            else:
wxchan's avatar
wxchan committed
168
                name_valid_sets.append('valid_' + str(i))
169
    # process callbacks
170
    if callbacks is None:
wxchan's avatar
wxchan committed
171
172
173
174
175
        callbacks = set()
    else:
        for i, cb in enumerate(callbacks):
            cb.__dict__.setdefault('order', i - len(callbacks))
        callbacks = set(callbacks)
wxchan's avatar
wxchan committed
176
177

    # Most of legacy advanced options becomes callbacks
wxchan's avatar
wxchan committed
178
179
    if verbose_eval is True:
        callbacks.add(callback.print_evaluation())
wxchan's avatar
wxchan committed
180
    elif isinstance(verbose_eval, integer_types):
wxchan's avatar
wxchan committed
181
        callbacks.add(callback.print_evaluation(verbose_eval))
wxchan's avatar
wxchan committed
182

183
    if early_stopping_rounds is not None:
184
        callbacks.add(callback.early_stopping(early_stopping_rounds, verbose=bool(verbose_eval)))
185

wxchan's avatar
wxchan committed
186
    if learning_rates is not None:
187
        callbacks.add(callback.reset_parameter(learning_rate=learning_rates))
wxchan's avatar
wxchan committed
188
189

    if evals_result is not None:
wxchan's avatar
wxchan committed
190
191
192
193
194
195
        callbacks.add(callback.record_evaluation(evals_result))

    callbacks_before_iter = {cb for cb in callbacks if getattr(cb, 'before_iteration', False)}
    callbacks_after_iter = callbacks - callbacks_before_iter
    callbacks_before_iter = sorted(callbacks_before_iter, key=attrgetter('order'))
    callbacks_after_iter = sorted(callbacks_after_iter, key=attrgetter('order'))
wxchan's avatar
wxchan committed
196

197
    # construct booster
198
199
200
201
    try:
        booster = Booster(params=params, train_set=train_set)
        if is_valid_contain_train:
            booster.set_train_data_name(train_data_name)
202
        for valid_set, name_valid_set in zip_(reduced_valid_sets, name_valid_sets):
203
204
205
206
207
            booster.add_valid(valid_set, name_valid_set)
    finally:
        train_set._reverse_update_params()
        for valid_set in reduced_valid_sets:
            valid_set._reverse_update_params()
208
    booster.best_iteration = 0
wxchan's avatar
wxchan committed
209

210
    # start training
wxchan's avatar
wxchan committed
211
    for i in range_(init_iteration, init_iteration + num_boost_round):
wxchan's avatar
wxchan committed
212
213
        for cb in callbacks_before_iter:
            cb(callback.CallbackEnv(model=booster,
214
                                    params=params,
wxchan's avatar
wxchan committed
215
                                    iteration=i,
216
217
                                    begin_iteration=init_iteration,
                                    end_iteration=init_iteration + num_boost_round,
wxchan's avatar
wxchan committed
218
219
220
221
222
223
                                    evaluation_result_list=None))

        booster.update(fobj=fobj)

        evaluation_result_list = []
        # check evaluation result.
224
        if valid_sets is not None:
wxchan's avatar
wxchan committed
225
226
227
228
229
230
            if is_valid_contain_train:
                evaluation_result_list.extend(booster.eval_train(feval))
            evaluation_result_list.extend(booster.eval_valid(feval))
        try:
            for cb in callbacks_after_iter:
                cb(callback.CallbackEnv(model=booster,
231
                                        params=params,
wxchan's avatar
wxchan committed
232
                                        iteration=i,
233
234
                                        begin_iteration=init_iteration,
                                        end_iteration=init_iteration + num_boost_round,
wxchan's avatar
wxchan committed
235
                                        evaluation_result_list=evaluation_result_list))
236
237
        except callback.EarlyStopException as earlyStopException:
            booster.best_iteration = earlyStopException.best_iteration + 1
wxchan's avatar
wxchan committed
238
            evaluation_result_list = earlyStopException.best_score
wxchan's avatar
wxchan committed
239
            break
wxchan's avatar
wxchan committed
240
241
242
    booster.best_score = collections.defaultdict(dict)
    for dataset_name, eval_name, score, _ in evaluation_result_list:
        booster.best_score[dataset_name][eval_name] = score
243
    if not keep_training_booster:
Nikita Titov's avatar
Nikita Titov committed
244
        booster.model_from_string(booster.model_to_string(), False).free_dataset()
wxchan's avatar
wxchan committed
245
246
247
    return booster


248
249
250
class _CVBooster(object):
    """Auxiliary data struct to hold all boosters of CV."""

251
252
    def __init__(self):
        self.boosters = []
253
        self.best_iteration = -1
254
255

    def append(self, booster):
256
        """Add a booster to _CVBooster."""
257
258
259
        self.boosters.append(booster)

    def __getattr__(self, name):
260
261
262
        """Redirect methods call of _CVBooster."""
        def handler_function(*args, **kwargs):
            """Call methods with each booster, and concatenate their results."""
263
264
265
266
            ret = []
            for booster in self.boosters:
                ret.append(getattr(booster, name)(*args, **kwargs))
            return ret
267
        return handler_function
wxchan's avatar
wxchan committed
268

269

270
271
def _make_n_folds(full_data, folds, nfold, params, seed, fpreproc=None, stratified=True,
                  shuffle=True, eval_train_metric=False):
272
    """Make a n-fold list of Booster from random indices."""
wxchan's avatar
wxchan committed
273
274
    full_data = full_data.construct()
    num_data = full_data.num_data()
275
    if folds is not None:
276
277
278
279
280
281
282
283
284
285
286
        if not hasattr(folds, '__iter__') and not hasattr(folds, 'split'):
            raise AttributeError("folds should be a generator or iterator of (train_idx, test_idx) tuples "
                                 "or scikit-learn splitter object with split method")
        if hasattr(folds, 'split'):
            group_info = full_data.get_group()
            if group_info is not None:
                group_info = group_info.astype(int)
                flatted_group = np.repeat(range_(len(group_info)), repeats=group_info)
            else:
                flatted_group = np.zeros(num_data, dtype=int)
            folds = folds.split(X=np.zeros(num_data), y=full_data.get_label(), groups=flatted_group)
wxchan's avatar
wxchan committed
287
    else:
wxchan's avatar
wxchan committed
288
289
290
291
292
        if 'objective' in params and params['objective'] == 'lambdarank':
            if not SKLEARN_INSTALLED:
                raise LightGBMError('Scikit-learn is required for lambdarank cv.')
            # lambdarank task, split according to groups
            group_info = full_data.get_group().astype(int)
293
            flatted_group = np.repeat(range_(len(group_info)), repeats=group_info)
294
            group_kfold = _LGBMGroupKFold(n_splits=nfold)
wxchan's avatar
wxchan committed
295
296
297
298
            folds = group_kfold.split(X=np.zeros(num_data), groups=flatted_group)
        elif stratified:
            if not SKLEARN_INSTALLED:
                raise LightGBMError('Scikit-learn is required for stratified cv.')
299
            skf = _LGBMStratifiedKFold(n_splits=nfold, shuffle=shuffle, random_state=seed)
wxchan's avatar
wxchan committed
300
            folds = skf.split(X=np.zeros(num_data), y=full_data.get_label())
extremin's avatar
extremin committed
301
        else:
wxchan's avatar
wxchan committed
302
303
304
305
306
307
308
            if shuffle:
                randidx = np.random.RandomState(seed).permutation(num_data)
            else:
                randidx = np.arange(num_data)
            kstep = int(num_data / nfold)
            test_id = [randidx[i: i + kstep] for i in range_(0, num_data, kstep)]
            train_id = [np.concatenate([test_id[i] for i in range_(nfold) if k != i]) for k in range_(nfold)]
309
            folds = zip_(train_id, test_id)
wxchan's avatar
wxchan committed
310

311
    ret = _CVBooster()
wxchan's avatar
wxchan committed
312
313
314
    for train_idx, test_idx in folds:
        train_set = full_data.subset(train_idx)
        valid_set = full_data.subset(test_idx)
wxchan's avatar
wxchan committed
315
316
        # run preprocessing on the data set if needed
        if fpreproc is not None:
wxchan's avatar
wxchan committed
317
            train_set, valid_set, tparam = fpreproc(train_set, valid_set, params.copy())
wxchan's avatar
wxchan committed
318
        else:
wxchan's avatar
wxchan committed
319
            tparam = params
320
        cvbooster = Booster(tparam, train_set)
321
322
        if eval_train_metric:
            cvbooster.add_valid(train_set, 'train')
323
324
        cvbooster.add_valid(valid_set, 'valid')
        ret.append(cvbooster)
wxchan's avatar
wxchan committed
325
326
    return ret

wxchan's avatar
wxchan committed
327

328
def _agg_cv_result(raw_results, eval_train_metric=False):
329
    """Aggregate cross-validation results."""
wxchan's avatar
wxchan committed
330
    cvmap = collections.defaultdict(list)
wxchan's avatar
wxchan committed
331
332
333
    metric_type = {}
    for one_result in raw_results:
        for one_line in one_result:
334
335
336
337
338
339
            if eval_train_metric:
                key = "{} {}".format(one_line[0], one_line[1])
            else:
                key = one_line[1]
            metric_type[key] = one_line[3]
            cvmap[key].append(one_line[2])
wxchan's avatar
wxchan committed
340
    return [('cv_agg', k, np.mean(v), metric_type[k], np.std(v)) for k, v in cvmap.items()]
wxchan's avatar
wxchan committed
341

wxchan's avatar
wxchan committed
342

343
def cv(params, train_set, num_boost_round=100,
344
       folds=None, nfold=5, stratified=True, shuffle=True,
wxchan's avatar
wxchan committed
345
       metrics=None, fobj=None, feval=None, init_model=None,
346
       feature_name='auto', categorical_feature='auto',
Guolin Ke's avatar
Guolin Ke committed
347
348
       early_stopping_rounds=None, fpreproc=None,
       verbose_eval=None, show_stdv=True, seed=0,
349
       callbacks=None, eval_train_metric=False):
350
    """Perform the cross-validation with given paramaters.
wxchan's avatar
wxchan committed
351
352
353
354

    Parameters
    ----------
    params : dict
355
        Parameters for Booster.
Guolin Ke's avatar
Guolin Ke committed
356
    train_set : Dataset
357
        Data to be trained on.
358
    num_boost_round : int, optional (default=100)
wxchan's avatar
wxchan committed
359
        Number of boosting iterations.
360
    folds : generator or iterator of (train_idx, test_idx) tuples, scikit-learn splitter object or None, optional (default=None)
361
        If generator or iterator, it should yield the train and test indices for each fold.
362
        If object, it should be one of the scikit-learn splitter classes
363
        (https://scikit-learn.org/stable/modules/classes.html#splitter-classes)
364
        and have ``split`` method.
365
        This argument has highest priority over other data split arguments.
366
    nfold : int, optional (default=5)
wxchan's avatar
wxchan committed
367
        Number of folds in CV.
368
369
    stratified : bool, optional (default=True)
        Whether to perform stratified sampling.
370
    shuffle : bool, optional (default=True)
371
372
373
374
375
        Whether to shuffle before splitting data.
    metrics : string, list of strings or None, optional (default=None)
        Evaluation metrics to be monitored while CV.
        If not None, the metric in ``params`` will be overridden.
    fobj : callable or None, optional (default=None)
wxchan's avatar
wxchan committed
376
        Custom objective function.
377
    feval : callable or None, optional (default=None)
378
        Customized evaluation function.
379
380
        Should accept two parameters: preds, train_data,
        and return (eval_name, eval_result, is_higher_better) or list of such tuples.
381
382
        For multi-class task, the preds is group by class_id first, then group by row_id.
        If you want to get i-th row preds in j-th class, the access way is preds[j * num_data + i].
383
384
        To ignore the default metric corresponding to the used objective,
        set ``metrics`` to the string ``"None"``.
385
    init_model : string, Booster or None, optional (default=None)
386
387
388
389
390
391
392
393
        Filename of LightGBM model or Booster instance used for continue training.
    feature_name : list of strings or 'auto', optional (default="auto")
        Feature names.
        If 'auto' and data is pandas DataFrame, data columns names are used.
    categorical_feature : list of strings or int, or 'auto', optional (default="auto")
        Categorical features.
        If list of int, interpreted as indices.
        If list of strings, interpreted as feature names (need to specify ``feature_name`` as well).
394
        If 'auto' and data is pandas DataFrame, pandas unordered categorical columns are used.
395
        All values in categorical features should be less than int32 max value (2147483647).
396
        Large values could be memory consuming. Consider using consecutive integers starting from zero.
397
        All negative values in categorical features will be treated as missing values.
398
    early_stopping_rounds : int or None, optional (default=None)
399
400
401
402
        Activates early stopping.
        CV score needs to improve at least every ``early_stopping_rounds`` round(s)
        to continue.
        Requires at least one metric. If there's more than one, will check all of them.
403
404
        To check only the first metric you can pass in ``callbacks``
        ``early_stopping`` callback with ``first_metric_only=True``.
405
        Last entry in evaluation history is the one from the best iteration.
406
407
    fpreproc : callable or None, optional (default=None)
        Preprocessing function that takes (dtrain, dtest, params)
wxchan's avatar
wxchan committed
408
        and returns transformed versions of those.
409
    verbose_eval : bool, int, or None, optional (default=None)
wxchan's avatar
wxchan committed
410
411
        Whether to display the progress.
        If None, progress will be displayed when np.ndarray is returned.
412
413
414
        If True, progress will be displayed at every boosting stage.
        If int, progress will be displayed at every given ``verbose_eval`` boosting stage.
    show_stdv : bool, optional (default=True)
wxchan's avatar
wxchan committed
415
        Whether to display the standard deviation in progress.
416
        Results are not affected by this parameter, and always contain std.
417
    seed : int, optional (default=0)
wxchan's avatar
wxchan committed
418
        Seed used to generate the folds (passed to numpy.random.seed).
419
    callbacks : list of callables or None, optional (default=None)
420
        List of callback functions that are applied at each iteration.
421
        See Callbacks in Python API for more information.
422
423
424
    eval_train_metric : bool, optional (default=False)
        Whether to display the train metric in progress.
        The score of the metric is calculated again after each training step, so there is some impact on performance.
wxchan's avatar
wxchan committed
425
426
427

    Returns
    -------
428
429
430
431
    eval_hist : dict
        Evaluation history.
        The dictionary has the following format:
        {'metric1-mean': [values], 'metric1-stdv': [values],
Qiwei Ye's avatar
Qiwei Ye committed
432
        'metric2-mean': [values], 'metric2-stdv': [values],
433
        ...}.
wxchan's avatar
wxchan committed
434
    """
Guolin Ke's avatar
Guolin Ke committed
435
    if not isinstance(train_set, Dataset):
436
        raise TypeError("Traninig only accepts Dataset object")
Guolin Ke's avatar
Guolin Ke committed
437

438
    params = copy.deepcopy(params)
439
440
    if fobj is not None:
        params['objective'] = 'none'
441
442
    for alias in ["num_iterations", "num_iteration", "n_iter", "num_tree", "num_trees",
                  "num_round", "num_rounds", "num_boost_round", "n_estimators"]:
443
444
445
446
447
448
449
450
451
452
        if alias in params:
            warnings.warn("Found `{}` in params. Will use it instead of argument".format(alias))
            num_boost_round = params.pop(alias)
            break
    for alias in ["early_stopping_round", "early_stopping_rounds", "early_stopping"]:
        if alias in params:
            warnings.warn("Found `{}` in params. Will use it instead of argument".format(alias))
            early_stopping_rounds = params.pop(alias)
            break

453
454
    if num_boost_round <= 0:
        raise ValueError("num_boost_round should be greater than zero.")
wxchan's avatar
wxchan committed
455
    if isinstance(init_model, string_type):
456
        predictor = _InnerPredictor(model_file=init_model, pred_parameter=params)
Guolin Ke's avatar
Guolin Ke committed
457
    elif isinstance(init_model, Booster):
458
        predictor = init_model._to_predictor(dict(init_model.params, **params))
Guolin Ke's avatar
Guolin Ke committed
459
460
    else:
        predictor = None
461
462
463
464
    train_set._update_params(params) \
             ._set_predictor(predictor) \
             .set_feature_name(feature_name) \
             .set_categorical_feature(categorical_feature)
Guolin Ke's avatar
Guolin Ke committed
465

Peter's avatar
Peter committed
466
467
    if metrics is not None:
        params['metric'] = metrics
wxchan's avatar
wxchan committed
468

wxchan's avatar
wxchan committed
469
    results = collections.defaultdict(list)
470
471
    cvfolds = _make_n_folds(train_set, folds=folds, nfold=nfold,
                            params=params, seed=seed, fpreproc=fpreproc,
472
473
                            stratified=stratified, shuffle=shuffle,
                            eval_train_metric=eval_train_metric)
wxchan's avatar
wxchan committed
474
475

    # setup callbacks
476
    if callbacks is None:
wxchan's avatar
wxchan committed
477
478
479
480
481
        callbacks = set()
    else:
        for i, cb in enumerate(callbacks):
            cb.__dict__.setdefault('order', i - len(callbacks))
        callbacks = set(callbacks)
482
    if early_stopping_rounds is not None:
483
        callbacks.add(callback.early_stopping(early_stopping_rounds, verbose=False))
wxchan's avatar
wxchan committed
484
485
    if verbose_eval is True:
        callbacks.add(callback.print_evaluation(show_stdv=show_stdv))
wxchan's avatar
wxchan committed
486
    elif isinstance(verbose_eval, integer_types):
wxchan's avatar
wxchan committed
487
        callbacks.add(callback.print_evaluation(verbose_eval, show_stdv=show_stdv))
wxchan's avatar
wxchan committed
488

wxchan's avatar
wxchan committed
489
490
491
492
    callbacks_before_iter = {cb for cb in callbacks if getattr(cb, 'before_iteration', False)}
    callbacks_after_iter = callbacks - callbacks_before_iter
    callbacks_before_iter = sorted(callbacks_before_iter, key=attrgetter('order'))
    callbacks_after_iter = sorted(callbacks_after_iter, key=attrgetter('order'))
wxchan's avatar
wxchan committed
493

wxchan's avatar
wxchan committed
494
    for i in range_(num_boost_round):
wxchan's avatar
wxchan committed
495
        for cb in callbacks_before_iter:
496
497
            cb(callback.CallbackEnv(model=cvfolds,
                                    params=params,
wxchan's avatar
wxchan committed
498
499
500
501
                                    iteration=i,
                                    begin_iteration=0,
                                    end_iteration=num_boost_round,
                                    evaluation_result_list=None))
wxchan's avatar
wxchan committed
502
        cvfolds.update(fobj=fobj)
503
        res = _agg_cv_result(cvfolds.eval_valid(feval), eval_train_metric)
wxchan's avatar
wxchan committed
504
505
        for _, key, mean, _, std in res:
            results[key + '-mean'].append(mean)
wxchan's avatar
wxchan committed
506
            results[key + '-stdv'].append(std)
wxchan's avatar
wxchan committed
507
508
        try:
            for cb in callbacks_after_iter:
509
510
                cb(callback.CallbackEnv(model=cvfolds,
                                        params=params,
wxchan's avatar
wxchan committed
511
512
513
514
                                        iteration=i,
                                        begin_iteration=0,
                                        end_iteration=num_boost_round,
                                        evaluation_result_list=res))
515
516
        except callback.EarlyStopException as earlyStopException:
            cvfolds.best_iteration = earlyStopException.best_iteration + 1
wxchan's avatar
wxchan committed
517
            for k in results:
518
                results[k] = results[k][:cvfolds.best_iteration]
wxchan's avatar
wxchan committed
519
            break
wxchan's avatar
wxchan committed
520
    return dict(results)