engine.py 31.6 KB
Newer Older
wxchan's avatar
wxchan committed
1
# coding: utf-8
2
"""Library with training routines of LightGBM."""
wxchan's avatar
wxchan committed
3
import collections
4
import copy
wxchan's avatar
wxchan committed
5
from operator import attrgetter
6
from pathlib import Path
7
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
8

wxchan's avatar
wxchan committed
9
import numpy as np
10

wxchan's avatar
wxchan committed
11
from . import callback
12
from .basic import Booster, Dataset, LightGBMError, _ArrayLike, _ConfigAliases, _InnerPredictor, _log_warning
13
from .compat import SKLEARN_INSTALLED, _LGBMGroupKFold, _LGBMStratifiedKFold
wxchan's avatar
wxchan committed
14

15
_LGBM_CustomObjectiveFunction = Callable[
16
17
    [np.ndarray, Dataset],
    Tuple[_ArrayLike, _ArrayLike]
18
19
]
_LGBM_CustomMetricFunction = Callable[
20
    [np.ndarray, Dataset],
21
22
    Tuple[str, float, bool]
]
wxchan's avatar
wxchan committed
23

24
25
26
27
28
29
30
31
32
33
34
35
36
37

def train(
    params: Dict[str, Any],
    train_set: Dataset,
    num_boost_round: int = 100,
    valid_sets: Optional[List[Dataset]] = None,
    valid_names: Optional[List[str]] = None,
    fobj: Optional[_LGBM_CustomObjectiveFunction] = None,
    feval: Optional[Union[_LGBM_CustomMetricFunction, List[_LGBM_CustomMetricFunction]]] = None,
    init_model: Optional[Union[str, Path, Booster]] = None,
    feature_name: Union[List[str], str] = 'auto',
    categorical_feature: Union[List[str], List[int], str] = 'auto',
    early_stopping_rounds: Optional[int] = None,
    evals_result: Optional[Dict[str, Any]] = None,
38
    verbose_eval: Union[bool, int, str] = 'warn',
39
40
41
    keep_training_booster: bool = False,
    callbacks: Optional[List[Callable]] = None
) -> Booster:
42
    """Perform the training with given parameters.
wxchan's avatar
wxchan committed
43
44
45
46

    Parameters
    ----------
    params : dict
47
        Parameters for training.
Guolin Ke's avatar
Guolin Ke committed
48
    train_set : Dataset
49
50
        Data to be trained on.
    num_boost_round : int, optional (default=100)
wxchan's avatar
wxchan committed
51
        Number of boosting iterations.
52
    valid_sets : list of Dataset, or None, optional (default=None)
53
        List of data to be evaluated on during training.
54
    valid_names : list of str, or None, optional (default=None)
55
56
        Names of ``valid_sets``.
    fobj : callable or None, optional (default=None)
wxchan's avatar
wxchan committed
57
        Customized objective function.
58
59
60
        Should accept two parameters: preds, train_data,
        and return (grad, hess).

61
            preds : numpy 1-D array
62
                The predicted values.
63
64
                Predicted values are returned before any transformation,
                e.g. they are raw margin instead of probability of positive class for binary task.
65
66
            train_data : Dataset
                The training dataset.
67
            grad : list, numpy 1-D array or pandas Series
68
69
                The value of the first order derivative (gradient) of the loss
                with respect to the elements of preds for each sample point.
70
            hess : list, numpy 1-D array or pandas Series
71
72
                The value of the second order derivative (Hessian) of the loss
                with respect to the elements of preds for each sample point.
73
74
75
76
77

        For multi-class task, the preds is group by class_id first, then group by row_id.
        If you want to get i-th row preds in j-th class, the access way is score[j * num_data + i]
        and you should group grad and hess in this way as well.

78
    feval : callable, list of callable, or None, optional (default=None)
wxchan's avatar
wxchan committed
79
        Customized evaluation function.
80
        Each evaluation function should accept two parameters: preds, train_data,
81
        and return (eval_name, eval_result, is_higher_better) or list of such tuples.
82

83
            preds : numpy 1-D array
84
                The predicted values.
85
86
                If ``fobj`` is specified, predicted values are returned before any transformation,
                e.g. they are raw margin instead of probability of positive class for binary task in this case.
87
88
            train_data : Dataset
                The training dataset.
89
            eval_name : str
90
                The name of evaluation function (without whitespaces).
91
92
93
94
95
            eval_result : float
                The eval result.
            is_higher_better : bool
                Is eval result higher better, e.g. AUC is ``is_higher_better``.

96
97
        For multi-class task, the preds is group by class_id first, then group by row_id.
        If you want to get i-th row preds in j-th class, the access way is preds[j * num_data + i].
98
99
        To ignore the default metric corresponding to the used objective,
        set the ``metric`` parameter to the string ``"None"`` in ``params``.
100
    init_model : str, pathlib.Path, Booster or None, optional (default=None)
101
        Filename of LightGBM model or Booster instance used for continue training.
102
    feature_name : list of str, or 'auto', optional (default="auto")
103
104
        Feature names.
        If 'auto' and data is pandas DataFrame, data columns names are used.
105
    categorical_feature : list of str or int, or 'auto', optional (default="auto")
106
107
        Categorical features.
        If list of int, interpreted as indices.
108
        If list of str, interpreted as feature names (need to specify ``feature_name`` as well).
109
        If 'auto' and data is pandas DataFrame, pandas unordered categorical columns are used.
110
        All values in categorical features should be less than int32 max value (2147483647).
111
        Large values could be memory consuming. Consider using consecutive integers starting from zero.
112
        All negative values in categorical features will be treated as missing values.
113
        The output cannot be monotonically constrained with respect to a categorical feature.
114
    early_stopping_rounds : int or None, optional (default=None)
115
        Activates early stopping. The model will train until the validation score stops improving.
116
117
118
119
        Validation score needs to improve at least every ``early_stopping_rounds`` round(s)
        to continue training.
        Requires at least one validation data and one metric.
        If there's more than one, will check all of them. But the training data is ignored anyway.
120
        To check only the first metric, set the ``first_metric_only`` parameter to ``True`` in ``params``.
121
122
        The index of iteration that has the best performance will be saved in the ``best_iteration`` field
        if early stopping logic is enabled by setting ``early_stopping_rounds``.
123
    evals_result : dict or None, optional (default=None)
124
125
        Dictionary used to store all evaluation results of all the items in ``valid_sets``.
        This should be initialized outside of your call to ``train()`` and should be empty.
126
        Any initial contents of the dictionary will be deleted.
127

Nikita Titov's avatar
Nikita Titov committed
128
129
        .. rubric:: Example

130
131
        With a ``valid_sets`` = [valid_set, train_set],
        ``valid_names`` = ['eval', 'train']
132
133
        and a ``params`` = {'metric': 'logloss'}
        returns {'train': {'logloss': ['0.48253', '0.35953', ...]},
134
        'eval': {'logloss': ['0.480385', '0.357756', ...]}}.
135

136
137
138
139
140
141
    verbose_eval : bool or int, optional (default=True)
        Requires at least one validation data.
        If True, the eval metric on the valid set is printed at each boosting stage.
        If int, the eval metric on the valid set is printed at every ``verbose_eval`` boosting stage.
        The last boosting stage or the boosting stage found by using ``early_stopping_rounds`` is also printed.

Nikita Titov's avatar
Nikita Titov committed
142
143
        .. rubric:: Example

144
        With ``verbose_eval`` = 4 and at least one item in ``valid_sets``,
145
        an evaluation metric is printed every 4 (instead of 1) boosting stages.
146

147
148
149
    keep_training_booster : bool, optional (default=False)
        Whether the returned Booster will be used to keep training.
        If False, the returned value will be converted into _InnerPredictor before returning.
150
        This means you won't be able to use ``eval``, ``eval_train`` or ``eval_valid`` methods of the returned Booster.
151
152
        When your model is very large and cause the memory error,
        you can try to set this param to ``True`` to avoid the model conversion performed during the internal call of ``model_to_string``.
153
        You can still use _InnerPredictor as ``init_model`` for future continue training.
154
    callbacks : list of callable, or None, optional (default=None)
155
        List of callback functions that are applied at each iteration.
156
        See Callbacks in Python API for more information.
wxchan's avatar
wxchan committed
157
158
159

    Returns
    -------
160
161
    booster : Booster
        The trained Booster model.
wxchan's avatar
wxchan committed
162
    """
163
    # create predictor first
164
    params = copy.deepcopy(params)
165
    if fobj is not None:
166
167
        for obj_alias in _ConfigAliases.get("objective"):
            params.pop(obj_alias, None)
168
        params['objective'] = 'none'
169
    for alias in _ConfigAliases.get("num_iterations"):
170
        if alias in params:
171
            num_boost_round = params.pop(alias)
172
            _log_warning(f"Found `{alias}` in params. Will use it instead of argument")
173
    params["num_iterations"] = num_boost_round
174
175
176
177
    # show deprecation warning only for early stop argument, setting early stop via global params should still be possible
    if early_stopping_rounds is not None and early_stopping_rounds > 0:
        _log_warning("'early_stopping_rounds' argument is deprecated and will be removed in a future release of LightGBM. "
                     "Pass 'early_stopping()' callback via 'callbacks' argument instead.")
178
    for alias in _ConfigAliases.get("early_stopping_round"):
179
180
        if alias in params:
            early_stopping_rounds = params.pop(alias)
181
182
    params["early_stopping_round"] = early_stopping_rounds
    first_metric_only = params.get('first_metric_only', False)
183

184
185
    if num_boost_round <= 0:
        raise ValueError("num_boost_round should be greater than zero.")
186
    predictor: Optional[_InnerPredictor] = None
187
    if isinstance(init_model, (str, Path)):
188
        predictor = _InnerPredictor(model_file=init_model, pred_parameter=params)
wxchan's avatar
wxchan committed
189
    elif isinstance(init_model, Booster):
190
        predictor = init_model._to_predictor(dict(init_model.params, **params))
191
    init_iteration = predictor.num_total_iteration if predictor is not None else 0
192
    # check dataset
Guolin Ke's avatar
Guolin Ke committed
193
    if not isinstance(train_set, Dataset):
194
        raise TypeError("Training only accepts Dataset object")
Guolin Ke's avatar
Guolin Ke committed
195

196
197
198
199
    train_set._update_params(params) \
             ._set_predictor(predictor) \
             .set_feature_name(feature_name) \
             .set_categorical_feature(categorical_feature)
Guolin Ke's avatar
Guolin Ke committed
200

wxchan's avatar
wxchan committed
201
202
    is_valid_contain_train = False
    train_data_name = "training"
Guolin Ke's avatar
Guolin Ke committed
203
    reduced_valid_sets = []
wxchan's avatar
wxchan committed
204
    name_valid_sets = []
205
    if valid_sets is not None:
Guolin Ke's avatar
Guolin Ke committed
206
207
        if isinstance(valid_sets, Dataset):
            valid_sets = [valid_sets]
208
        if isinstance(valid_names, str):
wxchan's avatar
wxchan committed
209
            valid_names = [valid_names]
Guolin Ke's avatar
Guolin Ke committed
210
        for i, valid_data in enumerate(valid_sets):
211
            # reduce cost for prediction training data
Guolin Ke's avatar
Guolin Ke committed
212
            if valid_data is train_set:
wxchan's avatar
wxchan committed
213
214
215
216
                is_valid_contain_train = True
                if valid_names is not None:
                    train_data_name = valid_names[i]
                continue
Guolin Ke's avatar
Guolin Ke committed
217
            if not isinstance(valid_data, Dataset):
218
                raise TypeError("Training only accepts Dataset object")
Nikita Titov's avatar
Nikita Titov committed
219
            reduced_valid_sets.append(valid_data._update_params(params).set_reference(train_set))
220
            if valid_names is not None and len(valid_names) > i:
wxchan's avatar
wxchan committed
221
222
                name_valid_sets.append(valid_names[i])
            else:
223
                name_valid_sets.append(f'valid_{i}')
224
    # process callbacks
225
    if callbacks is None:
226
        callbacks_set = set()
wxchan's avatar
wxchan committed
227
228
229
    else:
        for i, cb in enumerate(callbacks):
            cb.__dict__.setdefault('order', i - len(callbacks))
230
        callbacks_set = set(callbacks)
wxchan's avatar
wxchan committed
231
232

    # Most of legacy advanced options becomes callbacks
233
234
    if verbose_eval != "warn":
        _log_warning("'verbose_eval' argument is deprecated and will be removed in a future release of LightGBM. "
235
                     "Pass 'log_evaluation()' callback via 'callbacks' argument instead.")
236
    else:
237
        if callbacks_set:  # assume user has already specified log_evaluation callback
238
239
240
            verbose_eval = False
        else:
            verbose_eval = True
wxchan's avatar
wxchan committed
241
    if verbose_eval is True:
242
        callbacks_set.add(callback.log_evaluation())
243
    elif isinstance(verbose_eval, int):
244
        callbacks_set.add(callback.log_evaluation(verbose_eval))
wxchan's avatar
wxchan committed
245

246
    if early_stopping_rounds is not None and early_stopping_rounds > 0:
247
        callbacks_set.add(callback.early_stopping(early_stopping_rounds, first_metric_only, verbose=bool(verbose_eval)))
248

wxchan's avatar
wxchan committed
249
    if evals_result is not None:
250
251
        _log_warning("'evals_result' argument is deprecated and will be removed in a future release of LightGBM. "
                     "Pass 'record_evaluation()' callback via 'callbacks' argument instead.")
252
        callbacks_set.add(callback.record_evaluation(evals_result))
wxchan's avatar
wxchan committed
253

254
255
256
257
    callbacks_before_iter_set = {cb for cb in callbacks_set if getattr(cb, 'before_iteration', False)}
    callbacks_after_iter_set = callbacks_set - callbacks_before_iter_set
    callbacks_before_iter = sorted(callbacks_before_iter_set, key=attrgetter('order'))
    callbacks_after_iter = sorted(callbacks_after_iter_set, key=attrgetter('order'))
wxchan's avatar
wxchan committed
258

259
    # construct booster
260
261
262
263
    try:
        booster = Booster(params=params, train_set=train_set)
        if is_valid_contain_train:
            booster.set_train_data_name(train_data_name)
264
        for valid_set, name_valid_set in zip(reduced_valid_sets, name_valid_sets):
265
266
267
268
269
            booster.add_valid(valid_set, name_valid_set)
    finally:
        train_set._reverse_update_params()
        for valid_set in reduced_valid_sets:
            valid_set._reverse_update_params()
270
    booster.best_iteration = 0
wxchan's avatar
wxchan committed
271

272
    # start training
273
    for i in range(init_iteration, init_iteration + num_boost_round):
wxchan's avatar
wxchan committed
274
275
        for cb in callbacks_before_iter:
            cb(callback.CallbackEnv(model=booster,
276
                                    params=params,
wxchan's avatar
wxchan committed
277
                                    iteration=i,
278
279
                                    begin_iteration=init_iteration,
                                    end_iteration=init_iteration + num_boost_round,
wxchan's avatar
wxchan committed
280
281
282
283
284
285
                                    evaluation_result_list=None))

        booster.update(fobj=fobj)

        evaluation_result_list = []
        # check evaluation result.
286
        if valid_sets is not None:
wxchan's avatar
wxchan committed
287
288
289
290
291
292
            if is_valid_contain_train:
                evaluation_result_list.extend(booster.eval_train(feval))
            evaluation_result_list.extend(booster.eval_valid(feval))
        try:
            for cb in callbacks_after_iter:
                cb(callback.CallbackEnv(model=booster,
293
                                        params=params,
wxchan's avatar
wxchan committed
294
                                        iteration=i,
295
296
                                        begin_iteration=init_iteration,
                                        end_iteration=init_iteration + num_boost_round,
wxchan's avatar
wxchan committed
297
                                        evaluation_result_list=evaluation_result_list))
298
299
        except callback.EarlyStopException as earlyStopException:
            booster.best_iteration = earlyStopException.best_iteration + 1
wxchan's avatar
wxchan committed
300
            evaluation_result_list = earlyStopException.best_score
wxchan's avatar
wxchan committed
301
            break
302
    booster.best_score = collections.defaultdict(collections.OrderedDict)
wxchan's avatar
wxchan committed
303
304
    for dataset_name, eval_name, score, _ in evaluation_result_list:
        booster.best_score[dataset_name][eval_name] = score
305
    if not keep_training_booster:
306
        booster.model_from_string(booster.model_to_string()).free_dataset()
wxchan's avatar
wxchan committed
307
308
309
    return booster


310
class CVBooster:
311
312
313
314
315
316
317
318
319
320
321
322
323
    """CVBooster in LightGBM.

    Auxiliary data structure to hold and redirect all boosters of ``cv`` function.
    This class has the same methods as Booster class.
    All method calls are actually performed for underlying Boosters and then all returned results are returned in a list.

    Attributes
    ----------
    boosters : list of Booster
        The list of underlying fitted models.
    best_iteration : int
        The best iteration of fitted model.
    """
324

325
    def __init__(self):
326
327
328
329
        """Initialize the CVBooster.

        Generally, no need to instantiate manually.
        """
330
        self.boosters = []
331
        self.best_iteration = -1
332

333
334
    def _append(self, booster):
        """Add a booster to CVBooster."""
335
336
337
        self.boosters.append(booster)

    def __getattr__(self, name):
338
        """Redirect methods call of CVBooster."""
339
340
        def handler_function(*args, **kwargs):
            """Call methods with each booster, and concatenate their results."""
341
342
343
344
            ret = []
            for booster in self.boosters:
                ret.append(getattr(booster, name)(*args, **kwargs))
            return ret
345
        return handler_function
wxchan's avatar
wxchan committed
346

347

348
349
def _make_n_folds(full_data, folds, nfold, params, seed, fpreproc=None, stratified=True,
                  shuffle=True, eval_train_metric=False):
350
    """Make a n-fold list of Booster from random indices."""
wxchan's avatar
wxchan committed
351
352
    full_data = full_data.construct()
    num_data = full_data.num_data()
353
    if folds is not None:
354
355
356
357
358
359
        if not hasattr(folds, '__iter__') and not hasattr(folds, 'split'):
            raise AttributeError("folds should be a generator or iterator of (train_idx, test_idx) tuples "
                                 "or scikit-learn splitter object with split method")
        if hasattr(folds, 'split'):
            group_info = full_data.get_group()
            if group_info is not None:
360
                group_info = np.array(group_info, dtype=np.int32, copy=False)
361
                flatted_group = np.repeat(range(len(group_info)), repeats=group_info)
362
            else:
363
                flatted_group = np.zeros(num_data, dtype=np.int32)
364
            folds = folds.split(X=np.empty(num_data), y=full_data.get_label(), groups=flatted_group)
wxchan's avatar
wxchan committed
365
    else:
366
367
368
        if any(params.get(obj_alias, "") in {"lambdarank", "rank_xendcg", "xendcg",
                                             "xe_ndcg", "xe_ndcg_mart", "xendcg_mart"}
               for obj_alias in _ConfigAliases.get("objective")):
wxchan's avatar
wxchan committed
369
            if not SKLEARN_INSTALLED:
370
                raise LightGBMError('scikit-learn is required for ranking cv')
371
            # ranking task, split according to groups
372
            group_info = np.array(full_data.get_group(), dtype=np.int32, copy=False)
373
            flatted_group = np.repeat(range(len(group_info)), repeats=group_info)
374
            group_kfold = _LGBMGroupKFold(n_splits=nfold)
375
            folds = group_kfold.split(X=np.empty(num_data), groups=flatted_group)
wxchan's avatar
wxchan committed
376
377
        elif stratified:
            if not SKLEARN_INSTALLED:
378
                raise LightGBMError('scikit-learn is required for stratified cv')
379
            skf = _LGBMStratifiedKFold(n_splits=nfold, shuffle=shuffle, random_state=seed)
380
            folds = skf.split(X=np.empty(num_data), y=full_data.get_label())
extremin's avatar
extremin committed
381
        else:
wxchan's avatar
wxchan committed
382
383
384
385
386
            if shuffle:
                randidx = np.random.RandomState(seed).permutation(num_data)
            else:
                randidx = np.arange(num_data)
            kstep = int(num_data / nfold)
387
388
389
            test_id = [randidx[i: i + kstep] for i in range(0, num_data, kstep)]
            train_id = [np.concatenate([test_id[i] for i in range(nfold) if k != i]) for k in range(nfold)]
            folds = zip(train_id, test_id)
wxchan's avatar
wxchan committed
390

391
    ret = CVBooster()
wxchan's avatar
wxchan committed
392
    for train_idx, test_idx in folds:
393
394
        train_set = full_data.subset(sorted(train_idx))
        valid_set = full_data.subset(sorted(test_idx))
wxchan's avatar
wxchan committed
395
396
        # run preprocessing on the data set if needed
        if fpreproc is not None:
wxchan's avatar
wxchan committed
397
            train_set, valid_set, tparam = fpreproc(train_set, valid_set, params.copy())
wxchan's avatar
wxchan committed
398
        else:
wxchan's avatar
wxchan committed
399
            tparam = params
400
        cvbooster = Booster(tparam, train_set)
401
402
        if eval_train_metric:
            cvbooster.add_valid(train_set, 'train')
403
        cvbooster.add_valid(valid_set, 'valid')
404
        ret._append(cvbooster)
wxchan's avatar
wxchan committed
405
406
    return ret

wxchan's avatar
wxchan committed
407

408
def _agg_cv_result(raw_results, eval_train_metric=False):
409
    """Aggregate cross-validation results."""
410
    cvmap = collections.OrderedDict()
wxchan's avatar
wxchan committed
411
412
413
    metric_type = {}
    for one_result in raw_results:
        for one_line in one_result:
414
            if eval_train_metric:
415
                key = f"{one_line[0]} {one_line[1]}"
416
417
418
            else:
                key = one_line[1]
            metric_type[key] = one_line[3]
419
            cvmap.setdefault(key, [])
420
            cvmap[key].append(one_line[2])
wxchan's avatar
wxchan committed
421
    return [('cv_agg', k, np.mean(v), metric_type[k], np.std(v)) for k, v in cvmap.items()]
wxchan's avatar
wxchan committed
422

wxchan's avatar
wxchan committed
423

424
def cv(params, train_set, num_boost_round=100,
425
       folds=None, nfold=5, stratified=True, shuffle=True,
wxchan's avatar
wxchan committed
426
       metrics=None, fobj=None, feval=None, init_model=None,
427
       feature_name='auto', categorical_feature='auto',
Guolin Ke's avatar
Guolin Ke committed
428
429
       early_stopping_rounds=None, fpreproc=None,
       verbose_eval=None, show_stdv=True, seed=0,
430
431
       callbacks=None, eval_train_metric=False,
       return_cvbooster=False):
Andrew Ziem's avatar
Andrew Ziem committed
432
    """Perform the cross-validation with given parameters.
wxchan's avatar
wxchan committed
433
434
435
436

    Parameters
    ----------
    params : dict
437
        Parameters for Booster.
Guolin Ke's avatar
Guolin Ke committed
438
    train_set : Dataset
439
        Data to be trained on.
440
    num_boost_round : int, optional (default=100)
wxchan's avatar
wxchan committed
441
        Number of boosting iterations.
442
    folds : generator or iterator of (train_idx, test_idx) tuples, scikit-learn splitter object or None, optional (default=None)
443
        If generator or iterator, it should yield the train and test indices for each fold.
444
        If object, it should be one of the scikit-learn splitter classes
445
        (https://scikit-learn.org/stable/modules/classes.html#splitter-classes)
446
        and have ``split`` method.
447
        This argument has highest priority over other data split arguments.
448
    nfold : int, optional (default=5)
wxchan's avatar
wxchan committed
449
        Number of folds in CV.
450
451
    stratified : bool, optional (default=True)
        Whether to perform stratified sampling.
452
    shuffle : bool, optional (default=True)
453
        Whether to shuffle before splitting data.
454
    metrics : str, list of str, or None, optional (default=None)
455
456
457
        Evaluation metrics to be monitored while CV.
        If not None, the metric in ``params`` will be overridden.
    fobj : callable or None, optional (default=None)
458
459
460
461
        Customized objective function.
        Should accept two parameters: preds, train_data,
        and return (grad, hess).

462
            preds : numpy 1-D array
463
                The predicted values.
464
465
                Predicted values are returned before any transformation,
                e.g. they are raw margin instead of probability of positive class for binary task.
466
467
            train_data : Dataset
                The training dataset.
468
            grad : list, numpy 1-D array or pandas Series
469
470
                The value of the first order derivative (gradient) of the loss
                with respect to the elements of preds for each sample point.
471
            hess : list, numpy 1-D array or pandas Series
472
473
                The value of the second order derivative (Hessian) of the loss
                with respect to the elements of preds for each sample point.
474
475
476
477
478

        For multi-class task, the preds is group by class_id first, then group by row_id.
        If you want to get i-th row preds in j-th class, the access way is score[j * num_data + i]
        and you should group grad and hess in this way as well.

479
    feval : callable, list of callable, or None, optional (default=None)
480
        Customized evaluation function.
481
        Each evaluation function should accept two parameters: preds, train_data,
482
        and return (eval_name, eval_result, is_higher_better) or list of such tuples.
483

484
            preds : numpy 1-D array
485
                The predicted values.
486
487
                If ``fobj`` is specified, predicted values are returned before any transformation,
                e.g. they are raw margin instead of probability of positive class for binary task in this case.
488
489
            train_data : Dataset
                The training dataset.
490
            eval_name : str
Andrew Ziem's avatar
Andrew Ziem committed
491
                The name of evaluation function (without whitespace).
492
493
494
495
496
            eval_result : float
                The eval result.
            is_higher_better : bool
                Is eval result higher better, e.g. AUC is ``is_higher_better``.

497
498
        For multi-class task, the preds is group by class_id first, then group by row_id.
        If you want to get i-th row preds in j-th class, the access way is preds[j * num_data + i].
499
500
        To ignore the default metric corresponding to the used objective,
        set ``metrics`` to the string ``"None"``.
501
    init_model : str, pathlib.Path, Booster or None, optional (default=None)
502
        Filename of LightGBM model or Booster instance used for continue training.
503
    feature_name : list of str, or 'auto', optional (default="auto")
504
505
        Feature names.
        If 'auto' and data is pandas DataFrame, data columns names are used.
506
    categorical_feature : list of str or int, or 'auto', optional (default="auto")
507
508
        Categorical features.
        If list of int, interpreted as indices.
509
        If list of str, interpreted as feature names (need to specify ``feature_name`` as well).
510
        If 'auto' and data is pandas DataFrame, pandas unordered categorical columns are used.
511
        All values in categorical features should be less than int32 max value (2147483647).
512
        Large values could be memory consuming. Consider using consecutive integers starting from zero.
513
        All negative values in categorical features will be treated as missing values.
514
        The output cannot be monotonically constrained with respect to a categorical feature.
515
    early_stopping_rounds : int or None, optional (default=None)
516
517
518
519
        Activates early stopping.
        CV score needs to improve at least every ``early_stopping_rounds`` round(s)
        to continue.
        Requires at least one metric. If there's more than one, will check all of them.
520
        To check only the first metric, set the ``first_metric_only`` parameter to ``True`` in ``params``.
521
        Last entry in evaluation history is the one from the best iteration.
522
523
    fpreproc : callable or None, optional (default=None)
        Preprocessing function that takes (dtrain, dtest, params)
wxchan's avatar
wxchan committed
524
        and returns transformed versions of those.
525
    verbose_eval : bool, int, or None, optional (default=None)
wxchan's avatar
wxchan committed
526
        Whether to display the progress.
527
528
529
        If True, progress will be displayed at every boosting stage.
        If int, progress will be displayed at every given ``verbose_eval`` boosting stage.
    show_stdv : bool, optional (default=True)
wxchan's avatar
wxchan committed
530
        Whether to display the standard deviation in progress.
531
        Results are not affected by this parameter, and always contain std.
532
    seed : int, optional (default=0)
wxchan's avatar
wxchan committed
533
        Seed used to generate the folds (passed to numpy.random.seed).
534
    callbacks : list of callable, or None, optional (default=None)
535
        List of callback functions that are applied at each iteration.
536
        See Callbacks in Python API for more information.
537
538
539
    eval_train_metric : bool, optional (default=False)
        Whether to display the train metric in progress.
        The score of the metric is calculated again after each training step, so there is some impact on performance.
540
541
    return_cvbooster : bool, optional (default=False)
        Whether to return Booster models trained on each fold through ``CVBooster``.
wxchan's avatar
wxchan committed
542
543
544

    Returns
    -------
545
546
547
548
    eval_hist : dict
        Evaluation history.
        The dictionary has the following format:
        {'metric1-mean': [values], 'metric1-stdv': [values],
Qiwei Ye's avatar
Qiwei Ye committed
549
        'metric2-mean': [values], 'metric2-stdv': [values],
550
        ...}.
551
        If ``return_cvbooster=True``, also returns trained boosters via ``cvbooster`` key.
wxchan's avatar
wxchan committed
552
    """
Guolin Ke's avatar
Guolin Ke committed
553
    if not isinstance(train_set, Dataset):
554
        raise TypeError("Training only accepts Dataset object")
Guolin Ke's avatar
Guolin Ke committed
555

556
    params = copy.deepcopy(params)
557
    if fobj is not None:
558
559
        for obj_alias in _ConfigAliases.get("objective"):
            params.pop(obj_alias, None)
560
        params['objective'] = 'none'
561
    for alias in _ConfigAliases.get("num_iterations"):
562
        if alias in params:
563
            _log_warning(f"Found '{alias}' in params. Will use it instead of 'num_boost_round' argument")
564
            num_boost_round = params.pop(alias)
565
    params["num_iterations"] = num_boost_round
566
567
568
    if early_stopping_rounds is not None and early_stopping_rounds > 0:
        _log_warning("'early_stopping_rounds' argument is deprecated and will be removed in a future release of LightGBM. "
                     "Pass 'early_stopping()' callback via 'callbacks' argument instead.")
569
    for alias in _ConfigAliases.get("early_stopping_round"):
570
571
        if alias in params:
            early_stopping_rounds = params.pop(alias)
572
573
    params["early_stopping_round"] = early_stopping_rounds
    first_metric_only = params.get('first_metric_only', False)
574

575
576
    if num_boost_round <= 0:
        raise ValueError("num_boost_round should be greater than zero.")
577
    if isinstance(init_model, (str, Path)):
578
        predictor = _InnerPredictor(model_file=init_model, pred_parameter=params)
Guolin Ke's avatar
Guolin Ke committed
579
    elif isinstance(init_model, Booster):
580
        predictor = init_model._to_predictor(dict(init_model.params, **params))
Guolin Ke's avatar
Guolin Ke committed
581
582
583
    else:
        predictor = None

Peter's avatar
Peter committed
584
    if metrics is not None:
585
586
        for metric_alias in _ConfigAliases.get("metric"):
            params.pop(metric_alias, None)
Peter's avatar
Peter committed
587
        params['metric'] = metrics
wxchan's avatar
wxchan committed
588

589
590
591
592
593
    train_set._update_params(params) \
             ._set_predictor(predictor) \
             .set_feature_name(feature_name) \
             .set_categorical_feature(categorical_feature)

wxchan's avatar
wxchan committed
594
    results = collections.defaultdict(list)
595
596
    cvfolds = _make_n_folds(train_set, folds=folds, nfold=nfold,
                            params=params, seed=seed, fpreproc=fpreproc,
597
598
                            stratified=stratified, shuffle=shuffle,
                            eval_train_metric=eval_train_metric)
wxchan's avatar
wxchan committed
599
600

    # setup callbacks
601
    if callbacks is None:
wxchan's avatar
wxchan committed
602
603
604
605
606
        callbacks = set()
    else:
        for i, cb in enumerate(callbacks):
            cb.__dict__.setdefault('order', i - len(callbacks))
        callbacks = set(callbacks)
607
    if early_stopping_rounds is not None and early_stopping_rounds > 0:
608
        callbacks.add(callback.early_stopping(early_stopping_rounds, first_metric_only, verbose=False))
609
610
    if verbose_eval is not None:
        _log_warning("'verbose_eval' argument is deprecated and will be removed in a future release of LightGBM. "
611
                     "Pass 'log_evaluation()' callback via 'callbacks' argument instead.")
wxchan's avatar
wxchan committed
612
    if verbose_eval is True:
613
        callbacks.add(callback.log_evaluation(show_stdv=show_stdv))
614
    elif isinstance(verbose_eval, int):
615
        callbacks.add(callback.log_evaluation(verbose_eval, show_stdv=show_stdv))
wxchan's avatar
wxchan committed
616

wxchan's avatar
wxchan committed
617
618
619
620
    callbacks_before_iter = {cb for cb in callbacks if getattr(cb, 'before_iteration', False)}
    callbacks_after_iter = callbacks - callbacks_before_iter
    callbacks_before_iter = sorted(callbacks_before_iter, key=attrgetter('order'))
    callbacks_after_iter = sorted(callbacks_after_iter, key=attrgetter('order'))
wxchan's avatar
wxchan committed
621

622
    for i in range(num_boost_round):
wxchan's avatar
wxchan committed
623
        for cb in callbacks_before_iter:
624
625
            cb(callback.CallbackEnv(model=cvfolds,
                                    params=params,
wxchan's avatar
wxchan committed
626
627
628
629
                                    iteration=i,
                                    begin_iteration=0,
                                    end_iteration=num_boost_round,
                                    evaluation_result_list=None))
wxchan's avatar
wxchan committed
630
        cvfolds.update(fobj=fobj)
631
        res = _agg_cv_result(cvfolds.eval_valid(feval), eval_train_metric)
wxchan's avatar
wxchan committed
632
        for _, key, mean, _, std in res:
633
634
            results[f'{key}-mean'].append(mean)
            results[f'{key}-stdv'].append(std)
wxchan's avatar
wxchan committed
635
636
        try:
            for cb in callbacks_after_iter:
637
638
                cb(callback.CallbackEnv(model=cvfolds,
                                        params=params,
wxchan's avatar
wxchan committed
639
640
641
642
                                        iteration=i,
                                        begin_iteration=0,
                                        end_iteration=num_boost_round,
                                        evaluation_result_list=res))
643
644
        except callback.EarlyStopException as earlyStopException:
            cvfolds.best_iteration = earlyStopException.best_iteration + 1
wxchan's avatar
wxchan committed
645
            for k in results:
646
                results[k] = results[k][:cvfolds.best_iteration]
wxchan's avatar
wxchan committed
647
            break
648
649
650
651

    if return_cvbooster:
        results['cvbooster'] = cvfolds

wxchan's avatar
wxchan committed
652
    return dict(results)