sklearn.py 49.3 KB
Newer Older
wxchan's avatar
wxchan committed
1
# coding: utf-8
2
"""Scikit-learn wrapper interface for LightGBM."""
3
import copy
4
5
from inspect import signature

wxchan's avatar
wxchan committed
6
import numpy as np
7

8
9
10
11
from .basic import Dataset, LightGBMError, _choose_param_value, _ConfigAliases, _log_warning
from .compat import (SKLEARN_INSTALLED, LGBMNotFittedError, _LGBMAssertAllFinite, _LGBMCheckArray,
                     _LGBMCheckClassificationTargets, _LGBMCheckSampleWeight, _LGBMCheckXY, _LGBMClassifierBase,
                     _LGBMComputeSampleWeight, _LGBMLabelEncoder, _LGBMModelBase, _LGBMRegressorBase, dt_DataTable,
12
                     pd_DataFrame)
wxchan's avatar
wxchan committed
13
from .engine import train
14

wxchan's avatar
wxchan committed
15

16
class _ObjectiveFunctionWrapper:
17
    """Proxy class for objective function."""
18

19
20
    def __init__(self, func):
        """Construct a proxy class.
21

22
23
        This class transforms objective function to match objective function with signature ``new_func(preds, dataset)``
        as expected by ``lightgbm.engine.train``.
24

25
26
27
28
29
30
31
32
33
34
        Parameters
        ----------
        func : callable
            Expects a callable with signature ``func(y_true, y_pred)`` or ``func(y_true, y_pred, group)
            and returns (grad, hess):

                y_true : array-like of shape = [n_samples]
                    The target values.
                y_pred : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
                    The predicted values.
35
36
                    Predicted values are returned before any transformation,
                    e.g. they are raw margin instead of probability of positive class for binary task.
37
                group : array-like
38
39
40
                    Group/query data.
                    Only used in the learning-to-rank task.
                    sum(group) = n_samples.
41
42
                    For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
                    where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
43
                grad : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
44
45
                    The value of the first order derivative (gradient) of the loss
                    with respect to the elements of y_pred for each sample point.
46
                hess : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
47
48
                    The value of the second order derivative (Hessian) of the loss
                    with respect to the elements of y_pred for each sample point.
wxchan's avatar
wxchan committed
49

Nikita Titov's avatar
Nikita Titov committed
50
51
52
53
54
        .. note::

            For multi-class task, the y_pred is group by class_id first, then group by row_id.
            If you want to get i-th row y_pred in j-th class, the access way is y_pred[j * num_data + i]
            and you should group grad and hess in this way as well.
55
56
        """
        self.func = func
wxchan's avatar
wxchan committed
57

58
59
60
61
62
63
64
65
66
67
68
69
70
    def __call__(self, preds, dataset):
        """Call passed function with appropriate arguments.

        Parameters
        ----------
        preds : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
            The predicted values.
        dataset : Dataset
            The training dataset.

        Returns
        -------
        grad : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
71
72
            The value of the first order derivative (gradient) of the loss
            with respect to the elements of preds for each sample point.
73
        hess : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
74
75
            The value of the second order derivative (Hessian) of the loss
            with respect to the elements of preds for each sample point.
76
        """
wxchan's avatar
wxchan committed
77
        labels = dataset.get_label()
78
        argc = len(signature(self.func).parameters)
79
        if argc == 2:
80
            grad, hess = self.func(labels, preds)
81
        elif argc == 3:
82
            grad, hess = self.func(labels, preds, dataset.get_group())
83
        else:
84
            raise TypeError(f"Self-defined objective function should have 2 or 3 arguments, got {argc}")
wxchan's avatar
wxchan committed
85
86
87
88
89
90
91
92
93
94
95
        """weighted for objective"""
        weight = dataset.get_weight()
        if weight is not None:
            """only one class"""
            if len(weight) == len(grad):
                grad = np.multiply(grad, weight)
                hess = np.multiply(hess, weight)
            else:
                num_data = len(weight)
                num_class = len(grad) // num_data
                if num_class * num_data != len(grad):
96
                    raise ValueError("Length of grad and hess should equal to num_class * num_data")
97
98
                for k in range(num_class):
                    for i in range(num_data):
wxchan's avatar
wxchan committed
99
100
101
102
103
                        idx = k * num_data + i
                        grad[idx] *= weight[i]
                        hess[idx] *= weight[i]
        return grad, hess

wxchan's avatar
wxchan committed
104

105
class _EvalFunctionWrapper:
106
    """Proxy class for evaluation function."""
107

108
109
    def __init__(self, func):
        """Construct a proxy class.
110

111
112
        This class transforms evaluation function to match evaluation function with signature ``new_func(preds, dataset)``
        as expected by ``lightgbm.engine.train``.
113

114
115
116
117
118
119
120
121
122
123
124
125
126
127
        Parameters
        ----------
        func : callable
            Expects a callable with following signatures:
            ``func(y_true, y_pred)``,
            ``func(y_true, y_pred, weight)``
            or ``func(y_true, y_pred, weight, group)``
            and returns (eval_name, eval_result, is_higher_better) or
            list of (eval_name, eval_result, is_higher_better):

                y_true : array-like of shape = [n_samples]
                    The target values.
                y_pred : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
                    The predicted values.
128
129
                    In case of custom ``objective``, predicted values are returned before any transformation,
                    e.g. they are raw margin instead of probability of positive class for binary task in this case.
130
131
132
                weight : array-like of shape = [n_samples]
                    The weight of samples.
                group : array-like
133
134
135
                    Group/query data.
                    Only used in the learning-to-rank task.
                    sum(group) = n_samples.
136
137
                    For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
                    where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
138
                eval_name : string
139
                    The name of evaluation function (without whitespaces).
140
141
142
143
144
                eval_result : float
                    The eval result.
                is_higher_better : bool
                    Is eval result higher better, e.g. AUC is ``is_higher_better``.

Nikita Titov's avatar
Nikita Titov committed
145
146
147
148
        .. note::

            For multi-class task, the y_pred is group by class_id first, then group by row_id.
            If you want to get i-th row y_pred in j-th class, the access way is y_pred[j * num_data + i].
149
150
        """
        self.func = func
151

152
153
    def __call__(self, preds, dataset):
        """Call passed function with appropriate arguments.
154

155
156
157
158
159
160
161
162
163
164
        Parameters
        ----------
        preds : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
            The predicted values.
        dataset : Dataset
            The training dataset.

        Returns
        -------
        eval_name : string
165
            The name of evaluation function (without whitespaces).
166
167
168
169
170
        eval_result : float
            The eval result.
        is_higher_better : bool
            Is eval result higher better, e.g. AUC is ``is_higher_better``.
        """
171
        labels = dataset.get_label()
172
        argc = len(signature(self.func).parameters)
173
        if argc == 2:
174
            return self.func(labels, preds)
175
        elif argc == 3:
176
            return self.func(labels, preds, dataset.get_weight())
177
        elif argc == 4:
178
            return self.func(labels, preds, dataset.get_weight(), dataset.get_group())
179
        else:
180
            raise TypeError(f"Self-defined eval function should have 2, 3 or 4 arguments, got {argc}")
181

wxchan's avatar
wxchan committed
182

183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
# documentation templates for LGBMModel methods are shared between the classes in
# this module and those in the ``dask`` module

_lgbmmodel_doc_fit = (
    """
    Build a gradient boosting model from the training set (X, y).

    Parameters
    ----------
    X : {X_shape}
        Input feature matrix.
    y : {y_shape}
        The target values (class labels in classification, real numbers in regression).
    sample_weight : {sample_weight_shape}
        Weights of training data.
198
    init_score : {init_score_shape}
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
        Init score of training data.
    group : {group_shape}
        Group/query data.
        Only used in the learning-to-rank task.
        sum(group) = n_samples.
        For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
        where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
    eval_set : list or None, optional (default=None)
        A list of (X, y) tuple pairs to use as validation sets.
    eval_names : list of strings or None, optional (default=None)
        Names of eval_set.
    eval_sample_weight : list of arrays or None, optional (default=None)
        Weights of eval data.
    eval_class_weight : list or None, optional (default=None)
        Class weights of eval data.
    eval_init_score : list of arrays or None, optional (default=None)
        Init score of eval data.
    eval_group : list of arrays or None, optional (default=None)
        Group data of eval data.
    eval_metric : string, callable, list or None, optional (default=None)
        If string, it should be a built-in evaluation metric to use.
        If callable, it should be a custom evaluation metric, see note below for more details.
        If list, it can be a list of built-in metrics, a list of custom evaluation metrics, or a mix of both.
        In either case, the ``metric`` from the model parameters will be evaluated and used as well.
        Default: 'l2' for LGBMRegressor, 'logloss' for LGBMClassifier, 'ndcg' for LGBMRanker.
    early_stopping_rounds : int or None, optional (default=None)
        Activates early stopping. The model will train until the validation score stops improving.
        Validation score needs to improve at least every ``early_stopping_rounds`` round(s)
        to continue training.
        Requires at least one validation data and one metric.
        If there's more than one, will check all of them. But the training data is ignored anyway.
        To check only the first metric, set the ``first_metric_only`` parameter to ``True``
        in additional parameters ``**kwargs`` of the model constructor.
    verbose : bool or int, optional (default=True)
        Requires at least one evaluation data.
        If True, the eval metric on the eval set is printed at each boosting stage.
        If int, the eval metric on the eval set is printed at every ``verbose`` boosting stage.
        The last boosting stage or the boosting stage found by using ``early_stopping_rounds`` is also printed.

        .. rubric:: Example

        With ``verbose`` = 4 and at least one item in ``eval_set``,
        an evaluation metric is printed every 4 (instead of 1) boosting stages.

    feature_name : list of strings or 'auto', optional (default='auto')
        Feature names.
        If 'auto' and data is pandas DataFrame, data columns names are used.
    categorical_feature : list of strings or int, or 'auto', optional (default='auto')
        Categorical features.
        If list of int, interpreted as indices.
        If list of strings, interpreted as feature names (need to specify ``feature_name`` as well).
        If 'auto' and data is pandas DataFrame, pandas unordered categorical columns are used.
        All values in categorical features should be less than int32 max value (2147483647).
        Large values could be memory consuming. Consider using consecutive integers starting from zero.
        All negative values in categorical features will be treated as missing values.
        The output cannot be monotonically constrained with respect to a categorical feature.
    callbacks : list of callback functions or None, optional (default=None)
        List of callback functions that are applied at each iteration.
        See Callbacks in Python API for more information.
    init_model : string, Booster, LGBMModel or None, optional (default=None)
        Filename of LightGBM model, Booster instance or LGBMModel instance used for continue training.

    Returns
    -------
    self : object
        Returns self.
    """
)

_lgbmmodel_doc_custom_eval_note = """
    Note
    ----
    Custom eval function expects a callable with following signatures:
    ``func(y_true, y_pred)``, ``func(y_true, y_pred, weight)`` or
    ``func(y_true, y_pred, weight, group)``
    and returns (eval_name, eval_result, is_higher_better) or
    list of (eval_name, eval_result, is_higher_better):

        y_true : array-like of shape = [n_samples]
            The target values.
        y_pred : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
            The predicted values.
281
282
            In case of custom ``objective``, predicted values are returned before any transformation,
            e.g. they are raw margin instead of probability of positive class for binary task in this case.
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
        weight : array-like of shape = [n_samples]
            The weight of samples.
        group : array-like
            Group/query data.
            Only used in the learning-to-rank task.
            sum(group) = n_samples.
            For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
            where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
        eval_name : string
            The name of evaluation function (without whitespaces).
        eval_result : float
            The eval result.
        is_higher_better : bool
            Is eval result higher better, e.g. AUC is ``is_higher_better``.

    For multi-class task, the y_pred is group by class_id first, then group by row_id.
    If you want to get i-th row y_pred in j-th class, the access way is y_pred[j * num_data + i].
"""

_lgbmmodel_doc_predict = (
    """
    {description}

    Parameters
    ----------
    X : {X_shape}
        Input features matrix.
    raw_score : bool, optional (default=False)
        Whether to predict raw scores.
    start_iteration : int, optional (default=0)
        Start index of the iteration to predict.
        If <= 0, starts from the first iteration.
    num_iteration : int or None, optional (default=None)
        Total number of iterations used in the prediction.
        If None, if the best iteration exists and start_iteration <= 0, the best iteration is used;
        otherwise, all iterations from ``start_iteration`` are used (no limits).
        If <= 0, all iterations from ``start_iteration`` are used (no limits).
    pred_leaf : bool, optional (default=False)
        Whether to predict leaf index.
    pred_contrib : bool, optional (default=False)
        Whether to predict feature contributions.

        .. note::

            If you want to get more explanations for your model's predictions using SHAP values,
            like SHAP interaction values,
            you can install the shap package (https://github.com/slundberg/shap).
            Note that unlike the shap package, with ``pred_contrib`` we return a matrix with an extra
            column, where the last column is the expected value.

    **kwargs
        Other parameters for the prediction.

    Returns
    -------
    {output_name} : {predicted_result_shape}
        The predicted values.
    X_leaves : {X_leaves_shape}
        If ``pred_leaf=True``, the predicted leaf of every tree for each sample.
    X_SHAP_values : {X_SHAP_values_shape}
        If ``pred_contrib=True``, the feature contributions for each sample.
    """
)


348
349
class LGBMModel(_LGBMModelBase):
    """Implementation of the scikit-learn API for LightGBM."""
wxchan's avatar
wxchan committed
350

351
    def __init__(self, boosting_type='gbdt', num_leaves=31, max_depth=-1,
352
                 learning_rate=0.1, n_estimators=100,
353
                 subsample_for_bin=200000, objective=None, class_weight=None,
354
                 min_split_gain=0., min_child_weight=1e-3, min_child_samples=20,
355
                 subsample=1., subsample_freq=0, colsample_bytree=1.,
356
                 reg_alpha=0., reg_lambda=0., random_state=None,
357
                 n_jobs=-1, silent=True, importance_type='split', **kwargs):
358
        r"""Construct a gradient boosting model.
wxchan's avatar
wxchan committed
359
360
361

        Parameters
        ----------
362
        boosting_type : string, optional (default='gbdt')
363
364
365
366
367
            'gbdt', traditional Gradient Boosting Decision Tree.
            'dart', Dropouts meet Multiple Additive Regression Trees.
            'goss', Gradient-based One-Side Sampling.
            'rf', Random Forest.
        num_leaves : int, optional (default=31)
wxchan's avatar
wxchan committed
368
            Maximum tree leaves for base learners.
369
        max_depth : int, optional (default=-1)
370
            Maximum tree depth for base learners, <=0 means no limit.
371
        learning_rate : float, optional (default=0.1)
372
            Boosting learning rate.
373
374
375
            You can use ``callbacks`` parameter of ``fit`` method to shrink/adapt learning rate
            in training using ``reset_parameter`` callback.
            Note, that this will ignore the ``learning_rate`` argument in training.
376
        n_estimators : int, optional (default=100)
wxchan's avatar
wxchan committed
377
            Number of boosted trees to fit.
378
        subsample_for_bin : int, optional (default=200000)
wxchan's avatar
wxchan committed
379
            Number of samples for constructing bins.
380
        objective : string, callable or None, optional (default=None)
wxchan's avatar
wxchan committed
381
382
            Specify the learning task and the corresponding learning objective or
            a custom objective function to be used (see note below).
383
            Default: 'regression' for LGBMRegressor, 'binary' or 'multiclass' for LGBMClassifier, 'lambdarank' for LGBMRanker.
384
385
386
387
        class_weight : dict, 'balanced' or None, optional (default=None)
            Weights associated with classes in the form ``{class_label: weight}``.
            Use this parameter only for multi-class classification task;
            for binary classification task you may use ``is_unbalance`` or ``scale_pos_weight`` parameters.
388
389
390
            Note, that the usage of all these parameters will result in poor estimates of the individual class probabilities.
            You may want to consider performing probability calibration
            (https://scikit-learn.org/stable/modules/calibration.html) of your model.
391
392
393
            The 'balanced' mode uses the values of y to automatically adjust weights
            inversely proportional to class frequencies in the input data as ``n_samples / (n_classes * np.bincount(y))``.
            If None, all classes are supposed to have weight one.
394
            Note, that these weights will be multiplied with ``sample_weight`` (passed through the ``fit`` method)
395
            if ``sample_weight`` is specified.
396
        min_split_gain : float, optional (default=0.)
wxchan's avatar
wxchan committed
397
            Minimum loss reduction required to make a further partition on a leaf node of the tree.
398
        min_child_weight : float, optional (default=1e-3)
399
            Minimum sum of instance weight (hessian) needed in a child (leaf).
400
        min_child_samples : int, optional (default=20)
401
            Minimum number of data needed in a child (leaf).
402
        subsample : float, optional (default=1.)
wxchan's avatar
wxchan committed
403
            Subsample ratio of the training instance.
404
        subsample_freq : int, optional (default=0)
405
406
            Frequence of subsample, <=0 means no enable.
        colsample_bytree : float, optional (default=1.)
wxchan's avatar
wxchan committed
407
            Subsample ratio of columns when constructing each tree.
408
        reg_alpha : float, optional (default=0.)
409
            L1 regularization term on weights.
410
        reg_lambda : float, optional (default=0.)
411
            L2 regularization term on weights.
412
        random_state : int, RandomState object or None, optional (default=None)
wxchan's avatar
wxchan committed
413
            Random number seed.
414
415
416
            If int, this number is used to seed the C++ code.
            If RandomState object (numpy), a random integer is picked based on its state to seed the C++ code.
            If None, default seeds in C++ code are used.
417
        n_jobs : int, optional (default=-1)
418
            Number of parallel threads.
419
        silent : bool, optional (default=True)
wxchan's avatar
wxchan committed
420
            Whether to print messages while running boosting.
421
        importance_type : string, optional (default='split')
422
            The type of feature importance to be filled into ``feature_importances_``.
423
424
425
426
            If 'split', result contains numbers of times the feature is used in a model.
            If 'gain', result contains total gains of splits which use the feature.
        **kwargs
            Other parameters for the model.
wxchan's avatar
wxchan committed
427
            Check http://lightgbm.readthedocs.io/en/latest/Parameters.html for more parameters.
428

Nikita Titov's avatar
Nikita Titov committed
429
430
431
            .. warning::

                \*\*kwargs is not supported in sklearn, it may cause unexpected issues.
wxchan's avatar
wxchan committed
432
433
434

        Note
        ----
435
436
        A custom objective function can be provided for the ``objective`` parameter.
        In this case, it should have the signature
437
438
        ``objective(y_true, y_pred) -> grad, hess`` or
        ``objective(y_true, y_pred, group) -> grad, hess``:
wxchan's avatar
wxchan committed
439

Nikita Titov's avatar
Nikita Titov committed
440
            y_true : array-like of shape = [n_samples]
441
                The target values.
Nikita Titov's avatar
Nikita Titov committed
442
            y_pred : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
443
                The predicted values.
444
445
                Predicted values are returned before any transformation,
                e.g. they are raw margin instead of probability of positive class for binary task.
Nikita Titov's avatar
Nikita Titov committed
446
            group : array-like
447
448
449
                Group/query data.
                Only used in the learning-to-rank task.
                sum(group) = n_samples.
450
451
                For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
                where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
Nikita Titov's avatar
Nikita Titov committed
452
            grad : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
453
454
                The value of the first order derivative (gradient) of the loss
                with respect to the elements of y_pred for each sample point.
Nikita Titov's avatar
Nikita Titov committed
455
            hess : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
456
457
                The value of the second order derivative (Hessian) of the loss
                with respect to the elements of y_pred for each sample point.
wxchan's avatar
wxchan committed
458

459
460
461
        For multi-class task, the y_pred is group by class_id first, then group by row_id.
        If you want to get i-th row y_pred in j-th class, the access way is y_pred[j * num_data + i]
        and you should group grad and hess in this way as well.
wxchan's avatar
wxchan committed
462
        """
wxchan's avatar
wxchan committed
463
        if not SKLEARN_INSTALLED:
464
            raise LightGBMError('scikit-learn is required for lightgbm.sklearn')
wxchan's avatar
wxchan committed
465

466
        self.boosting_type = boosting_type
467
        self.objective = objective
wxchan's avatar
wxchan committed
468
469
470
471
        self.num_leaves = num_leaves
        self.max_depth = max_depth
        self.learning_rate = learning_rate
        self.n_estimators = n_estimators
wxchan's avatar
wxchan committed
472
        self.subsample_for_bin = subsample_for_bin
wxchan's avatar
wxchan committed
473
474
475
476
477
478
479
480
        self.min_split_gain = min_split_gain
        self.min_child_weight = min_child_weight
        self.min_child_samples = min_child_samples
        self.subsample = subsample
        self.subsample_freq = subsample_freq
        self.colsample_bytree = colsample_bytree
        self.reg_alpha = reg_alpha
        self.reg_lambda = reg_lambda
481
482
        self.random_state = random_state
        self.n_jobs = n_jobs
wxchan's avatar
wxchan committed
483
        self.silent = silent
484
        self.importance_type = importance_type
wxchan's avatar
wxchan committed
485
        self._Booster = None
486
487
488
489
        self._evals_result = None
        self._best_score = None
        self._best_iteration = None
        self._other_params = {}
490
        self._objective = objective
491
        self.class_weight = class_weight
492
493
        self._class_weight = None
        self._class_map = None
494
        self._n_features = None
495
        self._n_features_in = None
496
497
        self._classes = None
        self._n_classes = None
498
        self.set_params(**kwargs)
wxchan's avatar
wxchan committed
499

Nikita Titov's avatar
Nikita Titov committed
500
    def _more_tags(self):
501
502
503
504
505
506
507
508
509
510
        return {
            'allow_nan': True,
            'X_types': ['2darray', 'sparse', '1dlabels'],
            '_xfail_checks': {
                'check_no_attributes_set_in_init':
                'scikit-learn incorrectly asserts that private attributes '
                'cannot be set in __init__: '
                '(see https://github.com/microsoft/LightGBM/issues/2628)'
            }
        }
Nikita Titov's avatar
Nikita Titov committed
511

wxchan's avatar
wxchan committed
512
    def get_params(self, deep=True):
513
514
515
516
517
518
519
520
521
522
523
524
525
        """Get parameters for this estimator.

        Parameters
        ----------
        deep : bool, optional (default=True)
            If True, will return the parameters for this estimator and
            contained subobjects that are estimators.

        Returns
        -------
        params : dict
            Parameter names mapped to their values.
        """
526
        params = super().get_params(deep=deep)
527
        params.update(self._other_params)
wxchan's avatar
wxchan committed
528
529
530
        return params

    def set_params(self, **params):
531
532
533
534
535
536
537
538
539
540
541
542
        """Set the parameters of this estimator.

        Parameters
        ----------
        **params
            Parameter names with their new values.

        Returns
        -------
        self : object
            Returns self.
        """
wxchan's avatar
wxchan committed
543
544
        for key, value in params.items():
            setattr(self, key, value)
545
546
            if hasattr(self, f"_{key}"):
                setattr(self, f"_{key}", value)
547
            self._other_params[key] = value
wxchan's avatar
wxchan committed
548
        return self
wxchan's avatar
wxchan committed
549

Guolin Ke's avatar
Guolin Ke committed
550
    def fit(self, X, y,
551
            sample_weight=None, init_score=None, group=None,
552
            eval_set=None, eval_names=None, eval_sample_weight=None,
553
554
            eval_class_weight=None, eval_init_score=None, eval_group=None,
            eval_metric=None, early_stopping_rounds=None, verbose=True,
555
556
            feature_name='auto', categorical_feature='auto',
            callbacks=None, init_model=None):
557
        """Docstring is set after definition, using a template."""
558
559
560
561
562
563
564
565
566
567
        if self._objective is None:
            if isinstance(self, LGBMRegressor):
                self._objective = "regression"
            elif isinstance(self, LGBMClassifier):
                self._objective = "binary"
            elif isinstance(self, LGBMRanker):
                self._objective = "lambdarank"
            else:
                raise ValueError("Unknown LGBMModel type.")
        if callable(self._objective):
568
            self._fobj = _ObjectiveFunctionWrapper(self._objective)
569
570
        else:
            self._fobj = None
wxchan's avatar
wxchan committed
571
572
        evals_result = {}
        params = self.get_params()
wxchan's avatar
wxchan committed
573
        # user can set verbose with kwargs, it has higher priority
574
        if not any(verbose_alias in params for verbose_alias in _ConfigAliases.get("verbosity")) and self.silent:
575
            params['verbose'] = -1
wxchan's avatar
wxchan committed
576
        params.pop('silent', None)
577
        params.pop('importance_type', None)
wxchan's avatar
wxchan committed
578
        params.pop('n_estimators', None)
579
        params.pop('class_weight', None)
580
581
        if isinstance(params['random_state'], np.random.RandomState):
            params['random_state'] = params['random_state'].randint(np.iinfo(np.int32).max)
582
583
        for alias in _ConfigAliases.get('objective'):
            params.pop(alias, None)
584
        if self._n_classes is not None and self._n_classes > 2:
585
586
            for alias in _ConfigAliases.get('num_class'):
                params.pop(alias, None)
587
588
            params['num_class'] = self._n_classes
        if hasattr(self, '_eval_at'):
589
590
            for alias in _ConfigAliases.get('eval_at'):
                params.pop(alias, None)
591
            params['eval_at'] = self._eval_at
592
593
        params['objective'] = self._objective
        if self._fobj:
wxchan's avatar
wxchan committed
594
            params['objective'] = 'None'  # objective = nullptr for unknown objective
wxchan's avatar
wxchan committed
595

596
597
598
599
600
601
602
603
        # Do not modify original args in fit function
        # Refer to https://github.com/microsoft/LightGBM/pull/2619
        eval_metric_list = copy.deepcopy(eval_metric)
        if not isinstance(eval_metric_list, list):
            eval_metric_list = [eval_metric_list]

        # Separate built-in from callable evaluation metrics
        eval_metrics_callable = [_EvalFunctionWrapper(f) for f in eval_metric_list if callable(f)]
604
        eval_metrics_builtin = [m for m in eval_metric_list if isinstance(m, str)]
605
606

        # register default metric for consistency with callable eval_metric case
607
        original_metric = self._objective if isinstance(self._objective, str) else None
608
609
610
611
612
613
614
615
616
617
        if original_metric is None:
            # try to deduce from class instance
            if isinstance(self, LGBMRegressor):
                original_metric = "l2"
            elif isinstance(self, LGBMClassifier):
                original_metric = "multi_logloss" if self._n_classes > 2 else "binary_logloss"
            elif isinstance(self, LGBMRanker):
                original_metric = "ndcg"

        # overwrite default metric by explicitly set metric
618
        params = _choose_param_value("metric", params, original_metric)
619
620

        # concatenate metric from params (or default if not provided in params) and eval_metric
621
622
        params['metric'] = [params['metric']] if isinstance(params['metric'], (str, type(None))) else params['metric']
        params['metric'] = [e for e in eval_metrics_builtin if e not in params['metric']] + params['metric']
623
        params['metric'] = [metric for metric in params['metric'] if metric is not None]
wxchan's avatar
wxchan committed
624

625
        if not isinstance(X, (pd_DataFrame, dt_DataTable)):
626
            _X, _y = _LGBMCheckXY(X, y, accept_sparse=True, force_all_finite=False, ensure_min_samples=2)
627
628
            if sample_weight is not None:
                sample_weight = _LGBMCheckSampleWeight(sample_weight, _X)
629
630
        else:
            _X, _y = X, y
631

632
633
634
635
        if self._class_weight is None:
            self._class_weight = self.class_weight
        if self._class_weight is not None:
            class_sample_weight = _LGBMComputeSampleWeight(self._class_weight, y)
636
637
638
639
            if sample_weight is None or len(sample_weight) == 0:
                sample_weight = class_sample_weight
            else:
                sample_weight = np.multiply(sample_weight, class_sample_weight)
640

641
        self._n_features = _X.shape[1]
642
643
        # copy for consistency
        self._n_features_in = self._n_features
644

645
646
        def _construct_dataset(X, y, sample_weight, init_score, group, params,
                               categorical_feature='auto'):
647
            return Dataset(X, label=y, weight=sample_weight, group=group,
648
649
                           init_score=init_score, params=params,
                           categorical_feature=categorical_feature)
Guolin Ke's avatar
Guolin Ke committed
650

651
652
        train_set = _construct_dataset(_X, _y, sample_weight, init_score, group, params,
                                       categorical_feature=categorical_feature)
Guolin Ke's avatar
Guolin Ke committed
653
654
655

        valid_sets = []
        if eval_set is not None:
656

657
            def _get_meta_data(collection, name, i):
658
659
660
661
662
663
664
                if collection is None:
                    return None
                elif isinstance(collection, list):
                    return collection[i] if len(collection) > i else None
                elif isinstance(collection, dict):
                    return collection.get(i, None)
                else:
665
                    raise TypeError(f"{name} should be dict or list")
666

Guolin Ke's avatar
Guolin Ke committed
667
668
669
            if isinstance(eval_set, tuple):
                eval_set = [eval_set]
            for i, valid_data in enumerate(eval_set):
670
                # reduce cost for prediction training data
Guolin Ke's avatar
Guolin Ke committed
671
672
673
                if valid_data[0] is X and valid_data[1] is y:
                    valid_set = train_set
                else:
674
675
676
677
678
679
                    valid_weight = _get_meta_data(eval_sample_weight, 'eval_sample_weight', i)
                    valid_class_weight = _get_meta_data(eval_class_weight, 'eval_class_weight', i)
                    if valid_class_weight is not None:
                        if isinstance(valid_class_weight, dict) and self._class_map is not None:
                            valid_class_weight = {self._class_map[k]: v for k, v in valid_class_weight.items()}
                        valid_class_sample_weight = _LGBMComputeSampleWeight(valid_class_weight, valid_data[1])
680
681
682
683
                        if valid_weight is None or len(valid_weight) == 0:
                            valid_weight = valid_class_sample_weight
                        else:
                            valid_weight = np.multiply(valid_weight, valid_class_sample_weight)
684
685
                    valid_init_score = _get_meta_data(eval_init_score, 'eval_init_score', i)
                    valid_group = _get_meta_data(eval_group, 'eval_group', i)
686
687
                    valid_set = _construct_dataset(valid_data[0], valid_data[1],
                                                   valid_weight, valid_init_score, valid_group, params)
Guolin Ke's avatar
Guolin Ke committed
688
689
                valid_sets.append(valid_set)

690
691
692
        if isinstance(init_model, LGBMModel):
            init_model = init_model.booster_

Guolin Ke's avatar
Guolin Ke committed
693
        self._Booster = train(params, train_set,
694
                              self.n_estimators, valid_sets=valid_sets, valid_names=eval_names,
wxchan's avatar
wxchan committed
695
                              early_stopping_rounds=early_stopping_rounds,
696
                              evals_result=evals_result, fobj=self._fobj, feval=eval_metrics_callable,
Guolin Ke's avatar
Guolin Ke committed
697
                              verbose_eval=verbose, feature_name=feature_name,
698
                              callbacks=callbacks, init_model=init_model)
wxchan's avatar
wxchan committed
699
700

        if evals_result:
701
            self._evals_result = evals_result
wxchan's avatar
wxchan committed
702

703
        if early_stopping_rounds is not None and early_stopping_rounds > 0:
704
            self._best_iteration = self._Booster.best_iteration
705
706

        self._best_score = self._Booster.best_score
wxchan's avatar
wxchan committed
707

708
709
        self.fitted_ = True

wxchan's avatar
wxchan committed
710
        # free dataset
711
        self._Booster.free_dataset()
wxchan's avatar
wxchan committed
712
        del train_set, valid_sets
wxchan's avatar
wxchan committed
713
714
        return self

715
716
717
718
    fit.__doc__ = _lgbmmodel_doc_fit.format(
        X_shape="array-like or sparse matrix of shape = [n_samples, n_features]",
        y_shape="array-like of shape = [n_samples]",
        sample_weight_shape="array-like of shape = [n_samples] or None, optional (default=None)",
719
        init_score_shape="array-like of shape = [n_samples] or None, optional (default=None)",
720
721
722
        group_shape="array-like or None, optional (default=None)"
    ) + "\n\n" + _lgbmmodel_doc_custom_eval_note

723
    def predict(self, X, raw_score=False, start_iteration=0, num_iteration=None,
724
                pred_leaf=False, pred_contrib=False, **kwargs):
725
        """Docstring is set after definition, using a template."""
726
727
        if self._n_features is None:
            raise LGBMNotFittedError("Estimator not fitted, call `fit` before exploiting the model.")
728
        if not isinstance(X, (pd_DataFrame, dt_DataTable)):
729
            X = _LGBMCheckArray(X, accept_sparse=True, force_all_finite=False)
730
731
732
        n_features = X.shape[1]
        if self._n_features != n_features:
            raise ValueError("Number of features of the model must "
733
734
                             f"match the input. Model n_features_ is {self._n_features} and "
                             f"input n_features is {n_features}")
735
        return self._Booster.predict(X, raw_score=raw_score, start_iteration=start_iteration, num_iteration=num_iteration,
736
                                     pred_leaf=pred_leaf, pred_contrib=pred_contrib, **kwargs)
wxchan's avatar
wxchan committed
737

738
739
740
741
742
743
744
745
746
    predict.__doc__ = _lgbmmodel_doc_predict.format(
        description="Return the predicted value for each sample.",
        X_shape="array-like or sparse matrix of shape = [n_samples, n_features]",
        output_name="predicted_result",
        predicted_result_shape="array-like of shape = [n_samples] or shape = [n_samples, n_classes]",
        X_leaves_shape="array-like of shape = [n_samples, n_trees] or shape = [n_samples, n_trees * n_classes]",
        X_SHAP_values_shape="array-like of shape = [n_samples, n_features + 1] or shape = [n_samples, (n_features + 1) * n_classes] or list with n_classes length of such objects"
    )

747
748
    @property
    def n_features_(self):
749
        """:obj:`int`: The number of features of fitted model."""
750
751
752
753
        if self._n_features is None:
            raise LGBMNotFittedError('No n_features found. Need to call fit beforehand.')
        return self._n_features

754
755
756
757
758
759
760
    @property
    def n_features_in_(self):
        """:obj:`int`: The number of features of fitted model."""
        if self._n_features_in is None:
            raise LGBMNotFittedError('No n_features_in found. Need to call fit beforehand.')
        return self._n_features_in

761
762
    @property
    def best_score_(self):
763
        """:obj:`dict` or :obj:`None`: The best score of fitted model."""
764
765
766
767
768
769
        if self._n_features is None:
            raise LGBMNotFittedError('No best_score found. Need to call fit beforehand.')
        return self._best_score

    @property
    def best_iteration_(self):
770
        """:obj:`int` or :obj:`None`: The best iteration of fitted model if ``early_stopping_rounds`` has been specified."""
771
772
773
774
775
776
        if self._n_features is None:
            raise LGBMNotFittedError('No best_iteration found. Need to call fit with early_stopping_rounds beforehand.')
        return self._best_iteration

    @property
    def objective_(self):
777
        """:obj:`string` or :obj:`callable`: The concrete objective used while fitting this model."""
778
779
780
781
        if self._n_features is None:
            raise LGBMNotFittedError('No objective found. Need to call fit beforehand.')
        return self._objective

782
783
    @property
    def booster_(self):
784
        """Booster: The underlying Booster of this model."""
785
        if self._Booster is None:
786
            raise LGBMNotFittedError('No booster found. Need to call fit beforehand.')
787
        return self._Booster
wxchan's avatar
wxchan committed
788

789
790
    @property
    def evals_result_(self):
791
        """:obj:`dict` or :obj:`None`: The evaluation results if ``early_stopping_rounds`` has been specified."""
792
793
794
        if self._n_features is None:
            raise LGBMNotFittedError('No results found. Need to call fit with eval_set beforehand.')
        return self._evals_result
795
796

    @property
797
    def feature_importances_(self):
798
        """:obj:`array` of shape = [n_features]: The feature importances (the higher, the more important).
799

Nikita Titov's avatar
Nikita Titov committed
800
801
802
803
        .. note::

            ``importance_type`` attribute is passed to the function
            to configure the type of importance values to be extracted.
804
        """
805
806
        if self._n_features is None:
            raise LGBMNotFittedError('No feature_importances found. Need to call fit beforehand.')
807
        return self._Booster.feature_importance(importance_type=self.importance_type)
wxchan's avatar
wxchan committed
808

809
810
    @property
    def feature_name_(self):
811
        """:obj:`array` of shape = [n_features]: The names of features."""
812
813
814
815
        if self._n_features is None:
            raise LGBMNotFittedError('No feature_name found. Need to call fit beforehand.')
        return self._Booster.feature_name()

wxchan's avatar
wxchan committed
816

817
class LGBMRegressor(_LGBMRegressorBase, LGBMModel):
818
    """LightGBM regressor."""
wxchan's avatar
wxchan committed
819

Guolin Ke's avatar
Guolin Ke committed
820
821
    def fit(self, X, y,
            sample_weight=None, init_score=None,
822
            eval_set=None, eval_names=None, eval_sample_weight=None,
823
            eval_init_score=None, eval_metric=None, early_stopping_rounds=None,
824
825
            verbose=True, feature_name='auto', categorical_feature='auto',
            callbacks=None, init_model=None):
826
        """Docstring is inherited from the LGBMModel."""
827
828
829
830
831
        super().fit(X, y, sample_weight=sample_weight, init_score=init_score,
                    eval_set=eval_set, eval_names=eval_names, eval_sample_weight=eval_sample_weight,
                    eval_init_score=eval_init_score, eval_metric=eval_metric,
                    early_stopping_rounds=early_stopping_rounds, verbose=verbose, feature_name=feature_name,
                    categorical_feature=categorical_feature, callbacks=callbacks, init_model=init_model)
Guolin Ke's avatar
Guolin Ke committed
832
833
        return self

834
    _base_doc = LGBMModel.fit.__doc__
835
836
    _base_doc = (_base_doc[:_base_doc.find('group :')]  # type: ignore
                 + _base_doc[_base_doc.find('eval_set :'):])  # type: ignore
837
838
839
840
    _base_doc = (_base_doc[:_base_doc.find('eval_class_weight :')]
                 + _base_doc[_base_doc.find('eval_init_score :'):])
    fit.__doc__ = (_base_doc[:_base_doc.find('eval_group :')]
                   + _base_doc[_base_doc.find('eval_metric :'):])
wxchan's avatar
wxchan committed
841

842

843
class LGBMClassifier(_LGBMClassifierBase, LGBMModel):
844
    """LightGBM classifier."""
wxchan's avatar
wxchan committed
845

Guolin Ke's avatar
Guolin Ke committed
846
847
    def fit(self, X, y,
            sample_weight=None, init_score=None,
848
            eval_set=None, eval_names=None, eval_sample_weight=None,
849
            eval_class_weight=None, eval_init_score=None, eval_metric=None,
wxchan's avatar
wxchan committed
850
            early_stopping_rounds=None, verbose=True,
851
852
            feature_name='auto', categorical_feature='auto',
            callbacks=None, init_model=None):
853
        """Docstring is inherited from the LGBMModel."""
854
        _LGBMAssertAllFinite(y)
855
856
        _LGBMCheckClassificationTargets(y)
        self._le = _LGBMLabelEncoder().fit(y)
857
        _y = self._le.transform(y)
858
        self._class_map = dict(zip(self._le.classes_, self._le.transform(self._le.classes_)))
859
860
        if isinstance(self.class_weight, dict):
            self._class_weight = {self._class_map[k]: v for k, v in self.class_weight.items()}
861

862
863
        self._classes = self._le.classes_
        self._n_classes = len(self._classes)
864

865
        if self._n_classes > 2:
wxchan's avatar
wxchan committed
866
            # Switch to using a multiclass objective in the underlying LGBM instance
867
            ova_aliases = {"multiclassova", "multiclass_ova", "ova", "ovr"}
868
            if self._objective not in ova_aliases and not callable(self._objective):
869
                self._objective = "multiclass"
870
871

        if not callable(eval_metric):
872
            if isinstance(eval_metric, (str, type(None))):
873
874
875
876
877
878
879
880
881
882
883
884
885
                eval_metric = [eval_metric]
            if self._n_classes > 2:
                for index, metric in enumerate(eval_metric):
                    if metric in {'logloss', 'binary_logloss'}:
                        eval_metric[index] = "multi_logloss"
                    elif metric in {'error', 'binary_error'}:
                        eval_metric[index] = "multi_error"
            else:
                for index, metric in enumerate(eval_metric):
                    if metric in {'logloss', 'multi_logloss'}:
                        eval_metric[index] = 'binary_logloss'
                    elif metric in {'error', 'multi_error'}:
                        eval_metric[index] = 'binary_error'
wxchan's avatar
wxchan committed
886

887
888
        # do not modify args, as it causes errors in model selection tools
        valid_sets = None
wxchan's avatar
wxchan committed
889
        if eval_set is not None:
890
891
            if isinstance(eval_set, tuple):
                eval_set = [eval_set]
892
            valid_sets = [None] * len(eval_set)
893
894
            for i, (valid_x, valid_y) in enumerate(eval_set):
                if valid_x is X and valid_y is y:
895
                    valid_sets[i] = (valid_x, _y)
896
                else:
897
                    valid_sets[i] = (valid_x, self._le.transform(valid_y))
898

899
900
901
902
903
904
        super().fit(X, _y, sample_weight=sample_weight, init_score=init_score, eval_set=valid_sets,
                    eval_names=eval_names, eval_sample_weight=eval_sample_weight,
                    eval_class_weight=eval_class_weight, eval_init_score=eval_init_score,
                    eval_metric=eval_metric, early_stopping_rounds=early_stopping_rounds,
                    verbose=verbose, feature_name=feature_name, categorical_feature=categorical_feature,
                    callbacks=callbacks, init_model=init_model)
wxchan's avatar
wxchan committed
905
906
        return self

907
    _base_doc = LGBMModel.fit.__doc__
908
909
    _base_doc = (_base_doc[:_base_doc.find('group :')]  # type: ignore
                 + _base_doc[_base_doc.find('eval_set :'):])  # type: ignore
910
911
    fit.__doc__ = (_base_doc[:_base_doc.find('eval_group :')]
                   + _base_doc[_base_doc.find('eval_metric :'):])
912

913
    def predict(self, X, raw_score=False, start_iteration=0, num_iteration=None,
914
                pred_leaf=False, pred_contrib=False, **kwargs):
915
        """Docstring is inherited from the LGBMModel."""
916
        result = self.predict_proba(X, raw_score, start_iteration, num_iteration,
917
                                    pred_leaf, pred_contrib, **kwargs)
918
        if callable(self._objective) or raw_score or pred_leaf or pred_contrib:
919
920
921
922
            return result
        else:
            class_index = np.argmax(result, axis=1)
            return self._le.inverse_transform(class_index)
wxchan's avatar
wxchan committed
923

924
925
    predict.__doc__ = LGBMModel.predict.__doc__

926
    def predict_proba(self, X, raw_score=False, start_iteration=0, num_iteration=None,
927
                      pred_leaf=False, pred_contrib=False, **kwargs):
928
        """Docstring is set after definition, using a template."""
929
        result = super().predict(X, raw_score, start_iteration, num_iteration, pred_leaf, pred_contrib, **kwargs)
930
        if callable(self._objective) and not (raw_score or pred_leaf or pred_contrib):
931
932
933
            _log_warning("Cannot compute class probabilities or labels "
                         "due to the usage of customized objective function.\n"
                         "Returning raw scores instead.")
934
935
            return result
        elif self._n_classes > 2 or raw_score or pred_leaf or pred_contrib:
936
            return result
wxchan's avatar
wxchan committed
937
        else:
938
            return np.vstack((1. - result, result)).transpose()
939

940
941
942
943
    predict_proba.__doc__ = _lgbmmodel_doc_predict.format(
        description="Return the predicted probability for each class for each sample.",
        X_shape="array-like or sparse matrix of shape = [n_samples, n_features]",
        output_name="predicted_probability",
944
        predicted_result_shape="array-like of shape = [n_samples] or shape = [n_samples, n_classes]",
945
946
947
948
        X_leaves_shape="array-like of shape = [n_samples, n_trees] or shape = [n_samples, n_trees * n_classes]",
        X_SHAP_values_shape="array-like of shape = [n_samples, n_features + 1] or shape = [n_samples, (n_features + 1) * n_classes] or list with n_classes length of such objects"
    )

949
950
    @property
    def classes_(self):
951
        """:obj:`array` of shape = [n_classes]: The class label array."""
952
953
954
        if self._classes is None:
            raise LGBMNotFittedError('No classes found. Need to call fit beforehand.')
        return self._classes
955
956
957

    @property
    def n_classes_(self):
958
        """:obj:`int`: The number of classes."""
959
960
961
        if self._n_classes is None:
            raise LGBMNotFittedError('No classes found. Need to call fit beforehand.')
        return self._n_classes
wxchan's avatar
wxchan committed
962

wxchan's avatar
wxchan committed
963

wxchan's avatar
wxchan committed
964
class LGBMRanker(LGBMModel):
965
966
967
968
969
970
971
972
    """LightGBM ranker.

    .. warning::

        scikit-learn doesn't support ranking applications yet,
        therefore this class is not really compatible with the sklearn ecosystem.
        Please use this class mainly for training and applying ranking models in common sklearnish way.
    """
wxchan's avatar
wxchan committed
973

Guolin Ke's avatar
Guolin Ke committed
974
    def fit(self, X, y,
975
            sample_weight=None, init_score=None, group=None,
976
            eval_set=None, eval_names=None, eval_sample_weight=None,
977
            eval_init_score=None, eval_group=None, eval_metric=None,
978
            eval_at=(1, 2, 3, 4, 5), early_stopping_rounds=None, verbose=True,
979
980
            feature_name='auto', categorical_feature='auto',
            callbacks=None, init_model=None):
981
        """Docstring is inherited from the LGBMModel."""
982
        # check group data
Guolin Ke's avatar
Guolin Ke committed
983
        if group is None:
984
            raise ValueError("Should set group for ranking task")
wxchan's avatar
wxchan committed
985
986

        if eval_set is not None:
Guolin Ke's avatar
Guolin Ke committed
987
            if eval_group is None:
988
                raise ValueError("Eval_group cannot be None when eval_set is not None")
Guolin Ke's avatar
Guolin Ke committed
989
            elif len(eval_group) != len(eval_set):
990
                raise ValueError("Length of eval_group should be equal to eval_set")
991
            elif (isinstance(eval_group, dict)
992
                  and any(i not in eval_group or eval_group[i] is None for i in range(len(eval_group)))
993
994
                  or isinstance(eval_group, list)
                  and any(group is None for group in eval_group)):
995
996
                raise ValueError("Should set group for all eval datasets for ranking task; "
                                 "if you use dict, the index should start from 0")
997

998
        self._eval_at = eval_at
999
1000
1001
1002
1003
        super().fit(X, y, sample_weight=sample_weight, init_score=init_score, group=group,
                    eval_set=eval_set, eval_names=eval_names, eval_sample_weight=eval_sample_weight,
                    eval_init_score=eval_init_score, eval_group=eval_group, eval_metric=eval_metric,
                    early_stopping_rounds=early_stopping_rounds, verbose=verbose, feature_name=feature_name,
                    categorical_feature=categorical_feature, callbacks=callbacks, init_model=init_model)
wxchan's avatar
wxchan committed
1004
        return self
1005

1006
    _base_doc = LGBMModel.fit.__doc__
1007
1008
    fit.__doc__ = (_base_doc[:_base_doc.find('eval_class_weight :')]  # type: ignore
                   + _base_doc[_base_doc.find('eval_init_score :'):])  # type: ignore
1009
    _base_doc = fit.__doc__
1010
    _before_early_stop, _early_stop, _after_early_stop = _base_doc.partition('early_stopping_rounds :')
1011
1012
1013
1014
    fit.__doc__ = (f"{_before_early_stop}"
                   "eval_at : iterable of int, optional (default=(1, 2, 3, 4, 5))\n"
                   f"{' ':12}The evaluation positions of the specified metric.\n"
                   f"{' ':8}{_early_stop}{_after_early_stop}")