"tests/git@developer.sourcefind.cn:tianlh/lightgbm-dcu.git" did not exist on "2a00b6ffbc979fcbe68a8485aaa78d80005b163b"
engine.py 35.1 KB
Newer Older
wxchan's avatar
wxchan committed
1
# coding: utf-8
2
"""Library with training routines of LightGBM."""
3

4
import copy
5
import json
6
from collections import OrderedDict, defaultdict
wxchan's avatar
wxchan committed
7
from operator import attrgetter
8
from pathlib import Path
9
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union
10

wxchan's avatar
wxchan committed
11
import numpy as np
12

wxchan's avatar
wxchan committed
13
from . import callback
14
15
16
17
18
19
20
21
22
23
24
25
26
from .basic import (
    Booster,
    Dataset,
    LightGBMError,
    _choose_param_value,
    _ConfigAliases,
    _InnerPredictor,
    _LGBM_BoosterEvalMethodResultType,
    _LGBM_BoosterEvalMethodResultWithStandardDeviationType,
    _LGBM_CustomObjectiveFunction,
    _LGBM_EvalFunctionResultType,
    _log_warning,
)
27
from .compat import SKLEARN_INSTALLED, _LGBMBaseCrossValidator, _LGBMGroupKFold, _LGBMStratifiedKFold
wxchan's avatar
wxchan committed
28

29
__all__ = [
30
31
32
    "cv",
    "CVBooster",
    "train",
33
34
35
]


36
37
38
39
40
41
42
_LGBM_CustomMetricFunction = Union[
    Callable[
        [np.ndarray, Dataset],
        _LGBM_EvalFunctionResultType,
    ],
    Callable[
        [np.ndarray, Dataset],
43
        List[_LGBM_EvalFunctionResultType],
44
    ],
45
]
wxchan's avatar
wxchan committed
46

47
48
_LGBM_PreprocFunction = Callable[
    [Dataset, Dataset, Dict[str, Any]],
49
    Tuple[Dataset, Dataset, Dict[str, Any]],
50
51
]

52

53
def _choose_num_iterations(*, num_boost_round_kwarg: int, params: Dict[str, Any]) -> Dict[str, Any]:
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
    """Choose number of boosting rounds.

    In ``train()`` and ``cv()``, there are multiple ways to provide configuration for
    the number of boosting rounds to perform:

      * the ``num_boost_round`` keyword argument
      * any of the ``num_iterations`` or its aliases via the ``params`` dictionary

    These should be preferred in the following order (first one found wins):

      1. ``num_iterations`` provided via ``params`` (because it's the main parameter name)
      2. any other aliases of ``num_iterations`` provided via ``params``
      3. the ``num_boost_round`` keyword argument

    This function handles that choice, and issuing helpful warnings in the cases where the
    result might be surprising.

    Returns
    -------
    params : dict
        Parameters, with ``"num_iterations"`` set to the preferred value and all other
        aliases of ``num_iterations`` removed.
    """
    num_iteration_configs_provided = {
        alias: params[alias] for alias in _ConfigAliases.get("num_iterations") if alias in params
    }

    # now that the relevant information has been pulled out of params, it's safe to overwrite it
    # with the content that should be used for training (i.e. with aliases resolved)
    params = _choose_param_value(
        main_param_name="num_iterations",
        params=params,
        default_value=num_boost_round_kwarg,
    )

    # if there were not multiple boosting rounds configurations provided in params,
    # then by definition they cannot have conflicting values... no need to warn
    if len(num_iteration_configs_provided) <= 1:
        return params

    # if all the aliases have the same value, no need to warn
    if len(set(num_iteration_configs_provided.values())) <= 1:
        return params

    # if this line is reached, lightgbm should warn
    value_string = ", ".join(f"{alias}={val}" for alias, val in num_iteration_configs_provided.items())
    _log_warning(
        f"Found conflicting values for num_iterations provided via 'params': {value_string}. "
        f"LightGBM will perform up to {params['num_iterations']} boosting rounds. "
        "To be confident in the maximum number of boosting rounds LightGBM will perform and to "
        "suppress this warning, modify 'params' so that only one of those is present."
    )
    return params


109
110
111
112
113
114
115
116
117
def train(
    params: Dict[str, Any],
    train_set: Dataset,
    num_boost_round: int = 100,
    valid_sets: Optional[List[Dataset]] = None,
    valid_names: Optional[List[str]] = None,
    feval: Optional[Union[_LGBM_CustomMetricFunction, List[_LGBM_CustomMetricFunction]]] = None,
    init_model: Optional[Union[str, Path, Booster]] = None,
    keep_training_booster: bool = False,
118
    callbacks: Optional[List[Callable]] = None,
119
) -> Booster:
120
    """Perform the training with given parameters.
wxchan's avatar
wxchan committed
121
122
123
124

    Parameters
    ----------
    params : dict
125
126
        Parameters for training. Values passed through ``params`` take precedence over those
        supplied via arguments.
Guolin Ke's avatar
Guolin Ke committed
127
    train_set : Dataset
128
129
        Data to be trained on.
    num_boost_round : int, optional (default=100)
wxchan's avatar
wxchan committed
130
        Number of boosting iterations.
131
    valid_sets : list of Dataset, or None, optional (default=None)
132
        List of data to be evaluated on during training.
133
    valid_names : list of str, or None, optional (default=None)
134
        Names of ``valid_sets``.
135
    feval : callable, list of callable, or None, optional (default=None)
wxchan's avatar
wxchan committed
136
        Customized evaluation function.
Akshita Dixit's avatar
Akshita Dixit committed
137
        Each evaluation function should accept two parameters: preds, eval_data,
138
        and return (eval_name, eval_result, is_higher_better) or list of such tuples.
139

140
            preds : numpy 1-D array or numpy 2-D array (for multi-class task)
141
                The predicted values.
142
                For multi-class task, preds are numpy 2-D array of shape = [n_samples, n_classes].
143
                If custom objective function is used, predicted values are returned before any transformation,
144
                e.g. they are raw margin instead of probability of positive class for binary task in this case.
Akshita Dixit's avatar
Akshita Dixit committed
145
            eval_data : Dataset
146
                A ``Dataset`` to evaluate.
147
            eval_name : str
148
                The name of evaluation function (without whitespaces).
149
150
151
152
153
            eval_result : float
                The eval result.
            is_higher_better : bool
                Is eval result higher better, e.g. AUC is ``is_higher_better``.

154
155
        To ignore the default metric corresponding to the used objective,
        set the ``metric`` parameter to the string ``"None"`` in ``params``.
156
    init_model : str, pathlib.Path, Booster or None, optional (default=None)
157
158
159
160
        Filename of LightGBM model or Booster instance used for continue training.
    keep_training_booster : bool, optional (default=False)
        Whether the returned Booster will be used to keep training.
        If False, the returned value will be converted into _InnerPredictor before returning.
161
        This means you won't be able to use ``eval``, ``eval_train`` or ``eval_valid`` methods of the returned Booster.
162
163
        When your model is very large and cause the memory error,
        you can try to set this param to ``True`` to avoid the model conversion performed during the internal call of ``model_to_string``.
164
        You can still use _InnerPredictor as ``init_model`` for future continue training.
165
    callbacks : list of callable, or None, optional (default=None)
166
        List of callback functions that are applied at each iteration.
167
        See Callbacks in Python API for more information.
wxchan's avatar
wxchan committed
168

169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
    Note
    ----
    A custom objective function can be provided for the ``objective`` parameter.
    It should accept two parameters: preds, train_data and return (grad, hess).

        preds : numpy 1-D array or numpy 2-D array (for multi-class task)
            The predicted values.
            Predicted values are returned before any transformation,
            e.g. they are raw margin instead of probability of positive class for binary task.
        train_data : Dataset
            The training dataset.
        grad : numpy 1-D array or numpy 2-D array (for multi-class task)
            The value of the first order derivative (gradient) of the loss
            with respect to the elements of preds for each sample point.
        hess : numpy 1-D array or numpy 2-D array (for multi-class task)
            The value of the second order derivative (Hessian) of the loss
            with respect to the elements of preds for each sample point.

    For multi-class task, preds are numpy 2-D array of shape = [n_samples, n_classes],
    and grad and hess should be returned in the same format.

wxchan's avatar
wxchan committed
190
191
    Returns
    -------
192
193
    booster : Booster
        The trained Booster model.
wxchan's avatar
wxchan committed
194
    """
195
196
197
198
199
200
201
202
203
204
205
    if not isinstance(train_set, Dataset):
        raise TypeError(f"train() only accepts Dataset object, train_set has type '{type(train_set).__name__}'.")

    if isinstance(valid_sets, list):
        for i, valid_item in enumerate(valid_sets):
            if not isinstance(valid_item, Dataset):
                raise TypeError(
                    "Every item in valid_sets must be a Dataset object. "
                    f"Item {i} has type '{type(valid_item).__name__}'."
                )

206
    # create predictor first
207
    params = copy.deepcopy(params)
208
    params = _choose_param_value(
209
        main_param_name="objective",
210
        params=params,
211
        default_value=None,
212
    )
213
    fobj: Optional[_LGBM_CustomObjectiveFunction] = None
214
215
    if callable(params["objective"]):
        fobj = params["objective"]
216
        params["objective"] = "none"
217
218
219
220
221
222

    params = _choose_num_iterations(num_boost_round_kwarg=num_boost_round, params=params)
    num_boost_round = params["num_iterations"]
    if num_boost_round <= 0:
        raise ValueError(f"Number of boosting rounds must be greater than 0. Got {num_boost_round}.")

223
224
225
226
    # setting early stopping via global params should be possible
    params = _choose_param_value(
        main_param_name="early_stopping_round",
        params=params,
227
        default_value=None,
228
229
230
    )
    if params["early_stopping_round"] is None:
        params.pop("early_stopping_round")
231
    first_metric_only = params.get("first_metric_only", False)
232

233
    predictor: Optional[_InnerPredictor] = None
234
    if isinstance(init_model, (str, Path)):
235
        predictor = _InnerPredictor.from_model_file(model_file=init_model, pred_parameter=params)
wxchan's avatar
wxchan committed
236
    elif isinstance(init_model, Booster):
237
        predictor = _InnerPredictor.from_booster(booster=init_model, pred_parameter=dict(init_model.params, **params))
238
239
240
241
242

    if predictor is not None:
        init_iteration = predictor.current_iteration()
    else:
        init_iteration = 0
Guolin Ke's avatar
Guolin Ke committed
243

244
    train_set._update_params(params)._set_predictor(predictor)
Guolin Ke's avatar
Guolin Ke committed
245

wxchan's avatar
wxchan committed
246
247
    is_valid_contain_train = False
    train_data_name = "training"
Guolin Ke's avatar
Guolin Ke committed
248
    reduced_valid_sets = []
wxchan's avatar
wxchan committed
249
    name_valid_sets = []
250
    if valid_sets is not None:
Guolin Ke's avatar
Guolin Ke committed
251
252
        if isinstance(valid_sets, Dataset):
            valid_sets = [valid_sets]
253
        if isinstance(valid_names, str):
wxchan's avatar
wxchan committed
254
            valid_names = [valid_names]
Guolin Ke's avatar
Guolin Ke committed
255
        for i, valid_data in enumerate(valid_sets):
256
            # reduce cost for prediction training data
Guolin Ke's avatar
Guolin Ke committed
257
            if valid_data is train_set:
wxchan's avatar
wxchan committed
258
259
260
261
                is_valid_contain_train = True
                if valid_names is not None:
                    train_data_name = valid_names[i]
                continue
Nikita Titov's avatar
Nikita Titov committed
262
            reduced_valid_sets.append(valid_data._update_params(params).set_reference(train_set))
263
            if valid_names is not None and len(valid_names) > i:
wxchan's avatar
wxchan committed
264
265
                name_valid_sets.append(valid_names[i])
            else:
266
                name_valid_sets.append(f"valid_{i}")
267
    # process callbacks
268
    if callbacks is None:
269
        callbacks_set = set()
wxchan's avatar
wxchan committed
270
271
    else:
        for i, cb in enumerate(callbacks):
272
            cb.__dict__.setdefault("order", i - len(callbacks))
273
        callbacks_set = set(callbacks)
wxchan's avatar
wxchan committed
274

275
    if callback._should_enable_early_stopping(params.get("early_stopping_round", 0)):
276
277
        callbacks_set.add(
            callback.early_stopping(
278
                stopping_rounds=params["early_stopping_round"],  # type: ignore[arg-type]
279
                first_metric_only=first_metric_only,
280
                min_delta=params.get("early_stopping_min_delta", 0.0),
281
282
283
                verbose=_choose_param_value(
                    main_param_name="verbosity",
                    params=params,
284
285
286
                    default_value=1,
                ).pop("verbosity")
                > 0,
287
288
            )
        )
289

290
    callbacks_before_iter_set = {cb for cb in callbacks_set if getattr(cb, "before_iteration", False)}
291
    callbacks_after_iter_set = callbacks_set - callbacks_before_iter_set
292
293
    callbacks_before_iter = sorted(callbacks_before_iter_set, key=attrgetter("order"))
    callbacks_after_iter = sorted(callbacks_after_iter_set, key=attrgetter("order"))
wxchan's avatar
wxchan committed
294

295
    # construct booster
296
297
298
299
    try:
        booster = Booster(params=params, train_set=train_set)
        if is_valid_contain_train:
            booster.set_train_data_name(train_data_name)
300
        for valid_set, name_valid_set in zip(reduced_valid_sets, name_valid_sets):
301
302
303
304
305
            booster.add_valid(valid_set, name_valid_set)
    finally:
        train_set._reverse_update_params()
        for valid_set in reduced_valid_sets:
            valid_set._reverse_update_params()
306
    booster.best_iteration = 0
wxchan's avatar
wxchan committed
307

308
    # start training
309
    for i in range(init_iteration, init_iteration + num_boost_round):
wxchan's avatar
wxchan committed
310
        for cb in callbacks_before_iter:
311
312
313
314
315
316
317
318
319
320
            cb(
                callback.CallbackEnv(
                    model=booster,
                    params=params,
                    iteration=i,
                    begin_iteration=init_iteration,
                    end_iteration=init_iteration + num_boost_round,
                    evaluation_result_list=None,
                )
            )
wxchan's avatar
wxchan committed
321
322
323

        booster.update(fobj=fobj)

324
        evaluation_result_list: List[_LGBM_BoosterEvalMethodResultType] = []
wxchan's avatar
wxchan committed
325
        # check evaluation result.
326
        if valid_sets is not None:
wxchan's avatar
wxchan committed
327
328
329
330
331
            if is_valid_contain_train:
                evaluation_result_list.extend(booster.eval_train(feval))
            evaluation_result_list.extend(booster.eval_valid(feval))
        try:
            for cb in callbacks_after_iter:
332
333
334
335
336
337
338
339
340
341
                cb(
                    callback.CallbackEnv(
                        model=booster,
                        params=params,
                        iteration=i,
                        begin_iteration=init_iteration,
                        end_iteration=init_iteration + num_boost_round,
                        evaluation_result_list=evaluation_result_list,
                    )
                )
342
343
        except callback.EarlyStopException as earlyStopException:
            booster.best_iteration = earlyStopException.best_iteration + 1
344
345
346
            # eval results from cv() have a 5th element with the standard deviation of metrics,
            # which is not needed for early stopping
            evaluation_result_list = [item[:4] for item in earlyStopException.best_score]
wxchan's avatar
wxchan committed
347
            break
348
    booster.best_score = defaultdict(OrderedDict)
wxchan's avatar
wxchan committed
349
350
    for dataset_name, eval_name, score, _ in evaluation_result_list:
        booster.best_score[dataset_name][eval_name] = score
351
    if not keep_training_booster:
352
        booster.model_from_string(booster.model_to_string()).free_dataset()
wxchan's avatar
wxchan committed
353
354
355
    return booster


356
class CVBooster:
357
358
    """CVBooster in LightGBM.

359
    Auxiliary data structure to hold and redirect all boosters of ``cv()`` function.
360
    This class has the same methods as Booster class.
361
362
363
364
365
366
    All method calls, except for the following methods, are actually performed for underlying Boosters and
    then all returned results are returned in a list.

    - ``model_from_string()``
    - ``model_to_string()``
    - ``save_model()``
367
368
369
370
371
372
373
374

    Attributes
    ----------
    boosters : list of Booster
        The list of underlying fitted models.
    best_iteration : int
        The best iteration of fitted model.
    """
375

376
377
    def __init__(
        self,
378
        model_file: Optional[Union[str, Path]] = None,
379
    ):
380
381
        """Initialize the CVBooster.

382
383
384
385
        Parameters
        ----------
        model_file : str, pathlib.Path or None, optional (default=None)
            Path to the CVBooster model file.
386
        """
387
        self.boosters: List[Booster] = []
388
        self.best_iteration = -1
389

390
391
392
393
394
395
396
397
398
        if model_file is not None:
            with open(model_file, "r") as file:
                self._from_dict(json.load(file))

    def _from_dict(self, models: Dict[str, Any]) -> None:
        """Load CVBooster from dict."""
        self.best_iteration = models["best_iteration"]
        self.boosters = []
        for model_str in models["boosters"]:
399
            self.boosters.append(Booster(model_str=model_str))
400

401
402
403
404
405
406
407
    def _to_dict(
        self,
        *,
        num_iteration: Optional[int],
        start_iteration: int,
        importance_type: str,
    ) -> Dict[str, Any]:
408
409
410
        """Serialize CVBooster to dict."""
        models_str = []
        for booster in self.boosters:
411
412
413
414
415
            models_str.append(
                booster.model_to_string(
                    num_iteration=num_iteration, start_iteration=start_iteration, importance_type=importance_type
                )
            )
416
417
        return {"boosters": models_str, "best_iteration": self.best_iteration}

418
    def __getattr__(self, name: str) -> Callable[[Any, Any], List[Any]]:
419
        """Redirect methods call of CVBooster."""
420

421
        def handler_function(*args: Any, **kwargs: Any) -> List[Any]:
422
            """Call methods with each booster, and concatenate their results."""
423
424
425
426
            ret = []
            for booster in self.boosters:
                ret.append(getattr(booster, name)(*args, **kwargs))
            return ret
427

428
        return handler_function
wxchan's avatar
wxchan committed
429

430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
    def __getstate__(self) -> Dict[str, Any]:
        return vars(self)

    def __setstate__(self, state: Dict[str, Any]) -> None:
        vars(self).update(state)

    def model_from_string(self, model_str: str) -> "CVBooster":
        """Load CVBooster from a string.

        Parameters
        ----------
        model_str : str
            Model will be loaded from this string.

        Returns
        -------
        self : CVBooster
            Loaded CVBooster object.
        """
        self._from_dict(json.loads(model_str))
        return self

    def model_to_string(
        self,
        num_iteration: Optional[int] = None,
        start_iteration: int = 0,
456
        importance_type: str = "split",
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
    ) -> str:
        """Save CVBooster to JSON string.

        Parameters
        ----------
        num_iteration : int or None, optional (default=None)
            Index of the iteration that should be saved.
            If None, if the best iteration exists, it is saved; otherwise, all iterations are saved.
            If <= 0, all iterations are saved.
        start_iteration : int, optional (default=0)
            Start index of the iteration that should be saved.
        importance_type : str, optional (default="split")
            What type of feature importance should be saved.
            If "split", result contains numbers of times the feature is used in a model.
            If "gain", result contains total gains of splits which use the feature.

        Returns
        -------
        str_repr : str
            JSON string representation of CVBooster.
        """
478
479
480
        return json.dumps(
            self._to_dict(num_iteration=num_iteration, start_iteration=start_iteration, importance_type=importance_type)
        )
481
482
483
484
485
486

    def save_model(
        self,
        filename: Union[str, Path],
        num_iteration: Optional[int] = None,
        start_iteration: int = 0,
487
        importance_type: str = "split",
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
    ) -> "CVBooster":
        """Save CVBooster to a file as JSON text.

        Parameters
        ----------
        filename : str or pathlib.Path
            Filename to save CVBooster.
        num_iteration : int or None, optional (default=None)
            Index of the iteration that should be saved.
            If None, if the best iteration exists, it is saved; otherwise, all iterations are saved.
            If <= 0, all iterations are saved.
        start_iteration : int, optional (default=0)
            Start index of the iteration that should be saved.
        importance_type : str, optional (default="split")
            What type of feature importance should be saved.
            If "split", result contains numbers of times the feature is used in a model.
            If "gain", result contains total gains of splits which use the feature.

        Returns
        -------
        self : CVBooster
            Returns self.
        """
        with open(filename, "w") as file:
512
513
514
515
516
517
            json.dump(
                self._to_dict(
                    num_iteration=num_iteration, start_iteration=start_iteration, importance_type=importance_type
                ),
                file,
            )
518
519
520

        return self

521

522
def _make_n_folds(
523
    *,
524
525
526
527
528
    full_data: Dataset,
    folds: Optional[Union[Iterable[Tuple[np.ndarray, np.ndarray]], _LGBMBaseCrossValidator]],
    nfold: int,
    params: Dict[str, Any],
    seed: int,
529
530
531
    fpreproc: Optional[_LGBM_PreprocFunction],
    stratified: bool,
    shuffle: bool,
532
    eval_train_metric: bool,
533
) -> CVBooster:
534
    """Make a n-fold list of Booster from random indices."""
wxchan's avatar
wxchan committed
535
536
    full_data = full_data.construct()
    num_data = full_data.num_data()
537
    if folds is not None:
538
539
540
541
542
543
        if not hasattr(folds, "__iter__") and not hasattr(folds, "split"):
            raise AttributeError(
                "folds should be a generator or iterator of (train_idx, test_idx) tuples "
                "or scikit-learn splitter object with split method"
            )
        if hasattr(folds, "split"):
544
545
            group_info = full_data.get_group()
            if group_info is not None:
546
                group_info = np.asarray(group_info, dtype=np.int32)
547
                flatted_group = np.repeat(range(len(group_info)), repeats=group_info)
548
            else:
549
                flatted_group = np.zeros(num_data, dtype=np.int32)
550
            folds = folds.split(X=np.empty(num_data), y=full_data.get_label(), groups=flatted_group)
wxchan's avatar
wxchan committed
551
    else:
552
553
554
555
556
        if any(
            params.get(obj_alias, "")
            in {"lambdarank", "rank_xendcg", "xendcg", "xe_ndcg", "xe_ndcg_mart", "xendcg_mart"}
            for obj_alias in _ConfigAliases.get("objective")
        ):
wxchan's avatar
wxchan committed
557
            if not SKLEARN_INSTALLED:
558
                raise LightGBMError("scikit-learn is required for ranking cv")
559
            # ranking task, split according to groups
560
            group_info = np.asarray(full_data.get_group(), dtype=np.int32)
561
            flatted_group = np.repeat(range(len(group_info)), repeats=group_info)
562
            group_kfold = _LGBMGroupKFold(n_splits=nfold)
563
            folds = group_kfold.split(X=np.empty(num_data), groups=flatted_group)
wxchan's avatar
wxchan committed
564
565
        elif stratified:
            if not SKLEARN_INSTALLED:
566
                raise LightGBMError("scikit-learn is required for stratified cv")
567
            skf = _LGBMStratifiedKFold(n_splits=nfold, shuffle=shuffle, random_state=seed)
568
            folds = skf.split(X=np.empty(num_data), y=full_data.get_label())
extremin's avatar
extremin committed
569
        else:
wxchan's avatar
wxchan committed
570
571
572
573
574
            if shuffle:
                randidx = np.random.RandomState(seed).permutation(num_data)
            else:
                randidx = np.arange(num_data)
            kstep = int(num_data / nfold)
575
            test_id = [randidx[i : i + kstep] for i in range(0, num_data, kstep)]
576
577
            train_id = [np.concatenate([test_id[i] for i in range(nfold) if k != i]) for k in range(nfold)]
            folds = zip(train_id, test_id)
wxchan's avatar
wxchan committed
578

579
    ret = CVBooster()
wxchan's avatar
wxchan committed
580
    for train_idx, test_idx in folds:
581
582
        train_set = full_data.subset(sorted(train_idx))
        valid_set = full_data.subset(sorted(test_idx))
wxchan's avatar
wxchan committed
583
584
        # run preprocessing on the data set if needed
        if fpreproc is not None:
wxchan's avatar
wxchan committed
585
            train_set, valid_set, tparam = fpreproc(train_set, valid_set, params.copy())
wxchan's avatar
wxchan committed
586
        else:
wxchan's avatar
wxchan committed
587
            tparam = params
588
        booster_for_fold = Booster(tparam, train_set)
589
        if eval_train_metric:
590
591
            booster_for_fold.add_valid(train_set, "train")
        booster_for_fold.add_valid(valid_set, "valid")
592
        ret.boosters.append(booster_for_fold)
wxchan's avatar
wxchan committed
593
594
    return ret

wxchan's avatar
wxchan committed
595

596
def _agg_cv_result(
597
    raw_results: List[List[_LGBM_BoosterEvalMethodResultType]],
598
) -> List[_LGBM_BoosterEvalMethodResultWithStandardDeviationType]:
599
    """Aggregate cross-validation results."""
600
601
602
603
604
605
606
607
608
609
610
611
    # build up 2 maps, of the form:
    #
    # OrderedDict{
    #     (<dataset_name>, <metric_name>): <is_higher_better>
    # }
    #
    # OrderedDict{
    #     (<dataset_name>, <metric_name>): list[<metric_value>]
    # }
    #
    metric_types: Dict[Tuple[str, str], bool] = OrderedDict()
    metric_values: Dict[Tuple[str, str], List[float]] = OrderedDict()
wxchan's avatar
wxchan committed
612
    for one_result in raw_results:
613
614
615
616
617
618
619
620
621
622
623
624
        for dataset_name, metric_name, metric_value, is_higher_better in one_result:
            key = (dataset_name, metric_name)
            metric_types[key] = is_higher_better
            metric_values.setdefault(key, [])
            metric_values[key].append(metric_value)

    # turn that into a list of tuples of the form:
    #
    # [
    #     (<dataset_name>, <metric_name>, mean(<values>), <is_higher_better>, std_dev(<values>))
    # ]
    return [(k[0], k[1], float(np.mean(v)), metric_types[k], float(np.std(v))) for k, v in metric_values.items()]
wxchan's avatar
wxchan committed
625

wxchan's avatar
wxchan committed
626

627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
def cv(
    params: Dict[str, Any],
    train_set: Dataset,
    num_boost_round: int = 100,
    folds: Optional[Union[Iterable[Tuple[np.ndarray, np.ndarray]], _LGBMBaseCrossValidator]] = None,
    nfold: int = 5,
    stratified: bool = True,
    shuffle: bool = True,
    metrics: Optional[Union[str, List[str]]] = None,
    feval: Optional[Union[_LGBM_CustomMetricFunction, List[_LGBM_CustomMetricFunction]]] = None,
    init_model: Optional[Union[str, Path, Booster]] = None,
    fpreproc: Optional[_LGBM_PreprocFunction] = None,
    seed: int = 0,
    callbacks: Optional[List[Callable]] = None,
    eval_train_metric: bool = False,
642
    return_cvbooster: bool = False,
643
) -> Dict[str, Union[List[float], CVBooster]]:
Andrew Ziem's avatar
Andrew Ziem committed
644
    """Perform the cross-validation with given parameters.
wxchan's avatar
wxchan committed
645
646
647
648

    Parameters
    ----------
    params : dict
649
650
        Parameters for training. Values passed through ``params`` take precedence over those
        supplied via arguments.
Guolin Ke's avatar
Guolin Ke committed
651
    train_set : Dataset
652
        Data to be trained on.
653
    num_boost_round : int, optional (default=100)
wxchan's avatar
wxchan committed
654
        Number of boosting iterations.
655
    folds : generator or iterator of (train_idx, test_idx) tuples, scikit-learn splitter object or None, optional (default=None)
656
        If generator or iterator, it should yield the train and test indices for each fold.
657
        If object, it should be one of the scikit-learn splitter classes
658
        (https://scikit-learn.org/stable/modules/classes.html#splitter-classes)
659
        and have ``split`` method.
660
        This argument has highest priority over other data split arguments.
661
    nfold : int, optional (default=5)
wxchan's avatar
wxchan committed
662
        Number of folds in CV.
663
664
    stratified : bool, optional (default=True)
        Whether to perform stratified sampling.
665
    shuffle : bool, optional (default=True)
666
        Whether to shuffle before splitting data.
667
    metrics : str, list of str, or None, optional (default=None)
668
669
        Evaluation metrics to be monitored while CV.
        If not None, the metric in ``params`` will be overridden.
670
    feval : callable, list of callable, or None, optional (default=None)
671
        Customized evaluation function.
672
        Each evaluation function should accept two parameters: preds, eval_data,
673
        and return (eval_name, eval_result, is_higher_better) or list of such tuples.
674

675
            preds : numpy 1-D array or numpy 2-D array (for multi-class task)
676
                The predicted values.
677
                For multi-class task, preds are numpy 2-D array of shape = [n_samples, n_classes].
678
                If custom objective function is used, predicted values are returned before any transformation,
679
                e.g. they are raw margin instead of probability of positive class for binary task in this case.
680
681
            eval_data : Dataset
                A ``Dataset`` to evaluate.
682
            eval_name : str
Andrew Ziem's avatar
Andrew Ziem committed
683
                The name of evaluation function (without whitespace).
684
685
686
687
688
            eval_result : float
                The eval result.
            is_higher_better : bool
                Is eval result higher better, e.g. AUC is ``is_higher_better``.

689
690
        To ignore the default metric corresponding to the used objective,
        set ``metrics`` to the string ``"None"``.
691
    init_model : str, pathlib.Path, Booster or None, optional (default=None)
692
693
694
        Filename of LightGBM model or Booster instance used for continue training.
    fpreproc : callable or None, optional (default=None)
        Preprocessing function that takes (dtrain, dtest, params)
wxchan's avatar
wxchan committed
695
        and returns transformed versions of those.
696
    seed : int, optional (default=0)
wxchan's avatar
wxchan committed
697
        Seed used to generate the folds (passed to numpy.random.seed).
698
    callbacks : list of callable, or None, optional (default=None)
699
        List of callback functions that are applied at each iteration.
700
        See Callbacks in Python API for more information.
701
702
703
    eval_train_metric : bool, optional (default=False)
        Whether to display the train metric in progress.
        The score of the metric is calculated again after each training step, so there is some impact on performance.
704
705
    return_cvbooster : bool, optional (default=False)
        Whether to return Booster models trained on each fold through ``CVBooster``.
wxchan's avatar
wxchan committed
706

707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
    Note
    ----
    A custom objective function can be provided for the ``objective`` parameter.
    It should accept two parameters: preds, train_data and return (grad, hess).

        preds : numpy 1-D array or numpy 2-D array (for multi-class task)
            The predicted values.
            Predicted values are returned before any transformation,
            e.g. they are raw margin instead of probability of positive class for binary task.
        train_data : Dataset
            The training dataset.
        grad : numpy 1-D array or numpy 2-D array (for multi-class task)
            The value of the first order derivative (gradient) of the loss
            with respect to the elements of preds for each sample point.
        hess : numpy 1-D array or numpy 2-D array (for multi-class task)
            The value of the second order derivative (Hessian) of the loss
            with respect to the elements of preds for each sample point.

    For multi-class task, preds are numpy 2-D array of shape = [n_samples, n_classes],
    and grad and hess should be returned in the same format.

wxchan's avatar
wxchan committed
728
729
    Returns
    -------
730
731
    eval_results : dict
        History of evaluation results of each metric.
732
        The dictionary has the following format:
733
734
        {'valid metric1-mean': [values], 'valid metric1-stdv': [values],
        'valid metric2-mean': [values], 'valid metric2-stdv': [values],
735
        ...}.
736
        If ``return_cvbooster=True``, also returns trained boosters wrapped in a ``CVBooster`` object via ``cvbooster`` key.
737
738
739
740
741
        If ``eval_train_metric=True``, also returns the train metric history.
        In this case, the dictionary has the following format:
        {'train metric1-mean': [values], 'valid metric1-mean': [values],
        'train metric2-mean': [values], 'valid metric2-mean': [values],
        ...}.
wxchan's avatar
wxchan committed
742
    """
Guolin Ke's avatar
Guolin Ke committed
743
    if not isinstance(train_set, Dataset):
744
745
        raise TypeError(f"cv() only accepts Dataset object, train_set has type '{type(train_set).__name__}'.")

746
    params = copy.deepcopy(params)
747
    params = _choose_param_value(
748
        main_param_name="objective",
749
        params=params,
750
        default_value=None,
751
    )
752
    fobj: Optional[_LGBM_CustomObjectiveFunction] = None
753
754
    if callable(params["objective"]):
        fobj = params["objective"]
755
        params["objective"] = "none"
756
757
758
759
760
761

    params = _choose_num_iterations(num_boost_round_kwarg=num_boost_round, params=params)
    num_boost_round = params["num_iterations"]
    if num_boost_round <= 0:
        raise ValueError(f"Number of boosting rounds must be greater than 0. Got {num_boost_round}.")

762
763
764
765
    # setting early stopping via global params should be possible
    params = _choose_param_value(
        main_param_name="early_stopping_round",
        params=params,
766
        default_value=None,
767
768
769
    )
    if params["early_stopping_round"] is None:
        params.pop("early_stopping_round")
770
    first_metric_only = params.get("first_metric_only", False)
771

772
    if isinstance(init_model, (str, Path)):
773
774
        predictor = _InnerPredictor.from_model_file(
            model_file=init_model,
775
            pred_parameter=params,
776
        )
Guolin Ke's avatar
Guolin Ke committed
777
    elif isinstance(init_model, Booster):
778
779
        predictor = _InnerPredictor.from_booster(
            booster=init_model,
780
            pred_parameter=dict(init_model.params, **params),
781
        )
Guolin Ke's avatar
Guolin Ke committed
782
783
784
    else:
        predictor = None

Peter's avatar
Peter committed
785
    if metrics is not None:
786
787
        for metric_alias in _ConfigAliases.get("metric"):
            params.pop(metric_alias, None)
788
        params["metric"] = metrics
wxchan's avatar
wxchan committed
789

790
    train_set._update_params(params)._set_predictor(predictor)
791

792
    results = defaultdict(list)
793
    cvbooster = _make_n_folds(
794
795
796
797
798
799
800
801
802
803
        full_data=train_set,
        folds=folds,
        nfold=nfold,
        params=params,
        seed=seed,
        fpreproc=fpreproc,
        stratified=stratified,
        shuffle=shuffle,
        eval_train_metric=eval_train_metric,
    )
wxchan's avatar
wxchan committed
804
805

    # setup callbacks
806
    if callbacks is None:
807
        callbacks_set = set()
wxchan's avatar
wxchan committed
808
809
    else:
        for i, cb in enumerate(callbacks):
810
            cb.__dict__.setdefault("order", i - len(callbacks))
811
        callbacks_set = set(callbacks)
812

813
    if callback._should_enable_early_stopping(params.get("early_stopping_round", 0)):
814
        callbacks_set.add(
815
            callback.early_stopping(
816
                stopping_rounds=params["early_stopping_round"],  # type: ignore[arg-type]
817
                first_metric_only=first_metric_only,
818
                min_delta=params.get("early_stopping_min_delta", 0.0),
819
820
821
                verbose=_choose_param_value(
                    main_param_name="verbosity",
                    params=params,
822
823
824
                    default_value=1,
                ).pop("verbosity")
                > 0,
825
826
            )
        )
wxchan's avatar
wxchan committed
827

828
    callbacks_before_iter_set = {cb for cb in callbacks_set if getattr(cb, "before_iteration", False)}
829
    callbacks_after_iter_set = callbacks_set - callbacks_before_iter_set
830
831
    callbacks_before_iter = sorted(callbacks_before_iter_set, key=attrgetter("order"))
    callbacks_after_iter = sorted(callbacks_after_iter_set, key=attrgetter("order"))
wxchan's avatar
wxchan committed
832

833
    for i in range(num_boost_round):
wxchan's avatar
wxchan committed
834
        for cb in callbacks_before_iter:
835
836
            cb(
                callback.CallbackEnv(
837
                    model=cvbooster,
838
839
840
841
842
843
844
                    params=params,
                    iteration=i,
                    begin_iteration=0,
                    end_iteration=num_boost_round,
                    evaluation_result_list=None,
                )
            )
845
846
        cvbooster.update(fobj=fobj)  # type: ignore[call-arg]
        res = _agg_cv_result(cvbooster.eval_valid(feval))  # type: ignore[call-arg]
847
848
849
        for dataset_name, metric_name, metric_mean, _, metric_std_dev in res:
            results[f"{dataset_name} {metric_name}-mean"].append(metric_mean)
            results[f"{dataset_name} {metric_name}-stdv"].append(metric_std_dev)
wxchan's avatar
wxchan committed
850
851
        try:
            for cb in callbacks_after_iter:
852
853
                cb(
                    callback.CallbackEnv(
854
                        model=cvbooster,
855
856
857
858
859
860
861
                        params=params,
                        iteration=i,
                        begin_iteration=0,
                        end_iteration=num_boost_round,
                        evaluation_result_list=res,
                    )
                )
862
        except callback.EarlyStopException as earlyStopException:
863
864
865
            cvbooster.best_iteration = earlyStopException.best_iteration + 1
            for bst in cvbooster.boosters:
                bst.best_iteration = cvbooster.best_iteration
wxchan's avatar
wxchan committed
866
            for k in results:
867
                results[k] = results[k][: cvbooster.best_iteration]
wxchan's avatar
wxchan committed
868
            break
869
870

    if return_cvbooster:
871
        results["cvbooster"] = cvbooster  # type: ignore[assignment]
872

wxchan's avatar
wxchan committed
873
    return dict(results)