sklearn.py 53.7 KB
Newer Older
wxchan's avatar
wxchan committed
1
# coding: utf-8
2
"""Scikit-learn wrapper interface for LightGBM."""
3
import copy
4
from inspect import signature
5
from typing import Callable, Dict, List, Optional, Tuple, Union
6

wxchan's avatar
wxchan committed
7
import numpy as np
8

9
from .basic import Dataset, LightGBMError, _choose_param_value, _ConfigAliases, _log_warning
10
from .callback import log_evaluation, record_evaluation
11
12
13
from .compat import (SKLEARN_INSTALLED, LGBMNotFittedError, _LGBMAssertAllFinite, _LGBMCheckArray,
                     _LGBMCheckClassificationTargets, _LGBMCheckSampleWeight, _LGBMCheckXY, _LGBMClassifierBase,
                     _LGBMComputeSampleWeight, _LGBMLabelEncoder, _LGBMModelBase, _LGBMRegressorBase, dt_DataTable,
14
                     pd_DataFrame, pd_Series)
wxchan's avatar
wxchan committed
15
from .engine import train
16

17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
_ArrayLike = Union[List, np.ndarray, pd_Series]
_EvalResultType = Tuple[str, float, bool]

_LGBM_ScikitCustomObjectiveFunction = Union[
    Callable[
        [np.ndarray, np.ndarray],
        Tuple[_ArrayLike, _ArrayLike]
    ],
    Callable[
        [np.ndarray, np.ndarray, np.ndarray],
        Tuple[_ArrayLike, _ArrayLike]
    ],
]
_LGBM_ScikitCustomEvalFunction = Union[
    Callable[
        [np.ndarray, np.ndarray],
        Union[_EvalResultType, List[_EvalResultType]]
    ],
    Callable[
        [np.ndarray, np.ndarray, np.ndarray],
        Union[_EvalResultType, List[_EvalResultType]]
    ],
    Callable[
        [np.ndarray, np.ndarray, np.ndarray, np.ndarray],
        Union[_EvalResultType, List[_EvalResultType]]
    ],
]

wxchan's avatar
wxchan committed
45

46
class _ObjectiveFunctionWrapper:
47
    """Proxy class for objective function."""
48

49
    def __init__(self, func: _LGBM_ScikitCustomObjectiveFunction):
50
        """Construct a proxy class.
51

52
53
        This class transforms objective function to match objective function with signature ``new_func(preds, dataset)``
        as expected by ``lightgbm.engine.train``.
54

55
56
57
        Parameters
        ----------
        func : callable
58
            Expects a callable with signature ``func(y_true, y_pred)`` or ``func(y_true, y_pred, group)``
59
60
61
62
63
64
            and returns (grad, hess):

                y_true : array-like of shape = [n_samples]
                    The target values.
                y_pred : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
                    The predicted values.
65
66
                    Predicted values are returned before any transformation,
                    e.g. they are raw margin instead of probability of positive class for binary task.
67
                group : array-like
68
69
70
                    Group/query data.
                    Only used in the learning-to-rank task.
                    sum(group) = n_samples.
71
72
                    For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
                    where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
73
                grad : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
74
75
                    The value of the first order derivative (gradient) of the loss
                    with respect to the elements of y_pred for each sample point.
76
                hess : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
77
78
                    The value of the second order derivative (Hessian) of the loss
                    with respect to the elements of y_pred for each sample point.
wxchan's avatar
wxchan committed
79

Nikita Titov's avatar
Nikita Titov committed
80
81
82
83
84
        .. note::

            For multi-class task, the y_pred is group by class_id first, then group by row_id.
            If you want to get i-th row y_pred in j-th class, the access way is y_pred[j * num_data + i]
            and you should group grad and hess in this way as well.
85
86
        """
        self.func = func
wxchan's avatar
wxchan committed
87

88
89
90
91
92
93
94
95
96
97
98
99
100
    def __call__(self, preds, dataset):
        """Call passed function with appropriate arguments.

        Parameters
        ----------
        preds : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
            The predicted values.
        dataset : Dataset
            The training dataset.

        Returns
        -------
        grad : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
101
102
            The value of the first order derivative (gradient) of the loss
            with respect to the elements of preds for each sample point.
103
        hess : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
104
105
            The value of the second order derivative (Hessian) of the loss
            with respect to the elements of preds for each sample point.
106
        """
wxchan's avatar
wxchan committed
107
        labels = dataset.get_label()
108
        argc = len(signature(self.func).parameters)
109
        if argc == 2:
110
            grad, hess = self.func(labels, preds)
111
        elif argc == 3:
112
            grad, hess = self.func(labels, preds, dataset.get_group())
113
        else:
114
            raise TypeError(f"Self-defined objective function should have 2 or 3 arguments, got {argc}")
wxchan's avatar
wxchan committed
115
116
117
118
119
120
121
122
123
124
125
        """weighted for objective"""
        weight = dataset.get_weight()
        if weight is not None:
            """only one class"""
            if len(weight) == len(grad):
                grad = np.multiply(grad, weight)
                hess = np.multiply(hess, weight)
            else:
                num_data = len(weight)
                num_class = len(grad) // num_data
                if num_class * num_data != len(grad):
126
                    raise ValueError("Length of grad and hess should equal to num_class * num_data")
127
128
                for k in range(num_class):
                    for i in range(num_data):
wxchan's avatar
wxchan committed
129
130
131
132
133
                        idx = k * num_data + i
                        grad[idx] *= weight[i]
                        hess[idx] *= weight[i]
        return grad, hess

wxchan's avatar
wxchan committed
134

135
class _EvalFunctionWrapper:
136
    """Proxy class for evaluation function."""
137

138
    def __init__(self, func: _LGBM_ScikitCustomEvalFunction):
139
        """Construct a proxy class.
140

141
142
        This class transforms evaluation function to match evaluation function with signature ``new_func(preds, dataset)``
        as expected by ``lightgbm.engine.train``.
143

144
145
146
147
148
149
150
151
152
153
154
155
156
157
        Parameters
        ----------
        func : callable
            Expects a callable with following signatures:
            ``func(y_true, y_pred)``,
            ``func(y_true, y_pred, weight)``
            or ``func(y_true, y_pred, weight, group)``
            and returns (eval_name, eval_result, is_higher_better) or
            list of (eval_name, eval_result, is_higher_better):

                y_true : array-like of shape = [n_samples]
                    The target values.
                y_pred : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
                    The predicted values.
158
159
                    In case of custom ``objective``, predicted values are returned before any transformation,
                    e.g. they are raw margin instead of probability of positive class for binary task in this case.
160
161
162
                weight : array-like of shape = [n_samples]
                    The weight of samples.
                group : array-like
163
164
165
                    Group/query data.
                    Only used in the learning-to-rank task.
                    sum(group) = n_samples.
166
167
                    For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
                    where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
168
                eval_name : str
Andrew Ziem's avatar
Andrew Ziem committed
169
                    The name of evaluation function (without whitespace).
170
171
172
173
174
                eval_result : float
                    The eval result.
                is_higher_better : bool
                    Is eval result higher better, e.g. AUC is ``is_higher_better``.

Nikita Titov's avatar
Nikita Titov committed
175
176
177
178
        .. note::

            For multi-class task, the y_pred is group by class_id first, then group by row_id.
            If you want to get i-th row y_pred in j-th class, the access way is y_pred[j * num_data + i].
179
180
        """
        self.func = func
181

182
183
    def __call__(self, preds, dataset):
        """Call passed function with appropriate arguments.
184

185
186
187
188
189
190
191
192
193
        Parameters
        ----------
        preds : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
            The predicted values.
        dataset : Dataset
            The training dataset.

        Returns
        -------
194
        eval_name : str
Andrew Ziem's avatar
Andrew Ziem committed
195
            The name of evaluation function (without whitespace).
196
197
198
199
200
        eval_result : float
            The eval result.
        is_higher_better : bool
            Is eval result higher better, e.g. AUC is ``is_higher_better``.
        """
201
        labels = dataset.get_label()
202
        argc = len(signature(self.func).parameters)
203
        if argc == 2:
204
            return self.func(labels, preds)
205
        elif argc == 3:
206
            return self.func(labels, preds, dataset.get_weight())
207
        elif argc == 4:
208
            return self.func(labels, preds, dataset.get_weight(), dataset.get_group())
209
        else:
210
            raise TypeError(f"Self-defined eval function should have 2, 3 or 4 arguments, got {argc}")
211

wxchan's avatar
wxchan committed
212

213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
# documentation templates for LGBMModel methods are shared between the classes in
# this module and those in the ``dask`` module

_lgbmmodel_doc_fit = (
    """
    Build a gradient boosting model from the training set (X, y).

    Parameters
    ----------
    X : {X_shape}
        Input feature matrix.
    y : {y_shape}
        The target values (class labels in classification, real numbers in regression).
    sample_weight : {sample_weight_shape}
        Weights of training data.
228
    init_score : {init_score_shape}
229
230
231
232
233
234
235
236
237
        Init score of training data.
    group : {group_shape}
        Group/query data.
        Only used in the learning-to-rank task.
        sum(group) = n_samples.
        For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
        where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
    eval_set : list or None, optional (default=None)
        A list of (X, y) tuple pairs to use as validation sets.
238
    eval_names : list of str, or None, optional (default=None)
239
        Names of eval_set.
240
    eval_sample_weight : {eval_sample_weight_shape}
241
242
243
        Weights of eval data.
    eval_class_weight : list or None, optional (default=None)
        Class weights of eval data.
244
    eval_init_score : {eval_init_score_shape}
245
        Init score of eval data.
246
    eval_group : {eval_group_shape}
247
        Group data of eval data.
248
249
    eval_metric : str, callable, list or None, optional (default=None)
        If str, it should be a built-in evaluation metric to use.
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
        If callable, it should be a custom evaluation metric, see note below for more details.
        If list, it can be a list of built-in metrics, a list of custom evaluation metrics, or a mix of both.
        In either case, the ``metric`` from the model parameters will be evaluated and used as well.
        Default: 'l2' for LGBMRegressor, 'logloss' for LGBMClassifier, 'ndcg' for LGBMRanker.
    early_stopping_rounds : int or None, optional (default=None)
        Activates early stopping. The model will train until the validation score stops improving.
        Validation score needs to improve at least every ``early_stopping_rounds`` round(s)
        to continue training.
        Requires at least one validation data and one metric.
        If there's more than one, will check all of them. But the training data is ignored anyway.
        To check only the first metric, set the ``first_metric_only`` parameter to ``True``
        in additional parameters ``**kwargs`` of the model constructor.
    verbose : bool or int, optional (default=True)
        Requires at least one evaluation data.
        If True, the eval metric on the eval set is printed at each boosting stage.
        If int, the eval metric on the eval set is printed at every ``verbose`` boosting stage.
        The last boosting stage or the boosting stage found by using ``early_stopping_rounds`` is also printed.

        .. rubric:: Example

        With ``verbose`` = 4 and at least one item in ``eval_set``,
        an evaluation metric is printed every 4 (instead of 1) boosting stages.

273
    feature_name : list of str, or 'auto', optional (default='auto')
274
275
        Feature names.
        If 'auto' and data is pandas DataFrame, data columns names are used.
276
    categorical_feature : list of str or int, or 'auto', optional (default='auto')
277
278
        Categorical features.
        If list of int, interpreted as indices.
279
        If list of str, interpreted as feature names (need to specify ``feature_name`` as well).
280
281
282
283
284
        If 'auto' and data is pandas DataFrame, pandas unordered categorical columns are used.
        All values in categorical features should be less than int32 max value (2147483647).
        Large values could be memory consuming. Consider using consecutive integers starting from zero.
        All negative values in categorical features will be treated as missing values.
        The output cannot be monotonically constrained with respect to a categorical feature.
285
    callbacks : list of callable, or None, optional (default=None)
286
287
        List of callback functions that are applied at each iteration.
        See Callbacks in Python API for more information.
288
    init_model : str, pathlib.Path, Booster, LGBMModel or None, optional (default=None)
289
290
291
292
        Filename of LightGBM model, Booster instance or LGBMModel instance used for continue training.

    Returns
    -------
293
    self : LGBMModel
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
        Returns self.
    """
)

_lgbmmodel_doc_custom_eval_note = """
    Note
    ----
    Custom eval function expects a callable with following signatures:
    ``func(y_true, y_pred)``, ``func(y_true, y_pred, weight)`` or
    ``func(y_true, y_pred, weight, group)``
    and returns (eval_name, eval_result, is_higher_better) or
    list of (eval_name, eval_result, is_higher_better):

        y_true : array-like of shape = [n_samples]
            The target values.
        y_pred : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
            The predicted values.
311
312
            In case of custom ``objective``, predicted values are returned before any transformation,
            e.g. they are raw margin instead of probability of positive class for binary task in this case.
313
314
315
316
317
318
319
320
        weight : array-like of shape = [n_samples]
            The weight of samples.
        group : array-like
            Group/query data.
            Only used in the learning-to-rank task.
            sum(group) = n_samples.
            For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
            where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
321
        eval_name : str
Andrew Ziem's avatar
Andrew Ziem committed
322
            The name of evaluation function (without whitespace).
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
        eval_result : float
            The eval result.
        is_higher_better : bool
            Is eval result higher better, e.g. AUC is ``is_higher_better``.

    For multi-class task, the y_pred is group by class_id first, then group by row_id.
    If you want to get i-th row y_pred in j-th class, the access way is y_pred[j * num_data + i].
"""

_lgbmmodel_doc_predict = (
    """
    {description}

    Parameters
    ----------
    X : {X_shape}
        Input features matrix.
    raw_score : bool, optional (default=False)
        Whether to predict raw scores.
    start_iteration : int, optional (default=0)
        Start index of the iteration to predict.
        If <= 0, starts from the first iteration.
    num_iteration : int or None, optional (default=None)
        Total number of iterations used in the prediction.
        If None, if the best iteration exists and start_iteration <= 0, the best iteration is used;
        otherwise, all iterations from ``start_iteration`` are used (no limits).
        If <= 0, all iterations from ``start_iteration`` are used (no limits).
    pred_leaf : bool, optional (default=False)
        Whether to predict leaf index.
    pred_contrib : bool, optional (default=False)
        Whether to predict feature contributions.

        .. note::

            If you want to get more explanations for your model's predictions using SHAP values,
            like SHAP interaction values,
            you can install the shap package (https://github.com/slundberg/shap).
            Note that unlike the shap package, with ``pred_contrib`` we return a matrix with an extra
            column, where the last column is the expected value.

    **kwargs
        Other parameters for the prediction.

    Returns
    -------
    {output_name} : {predicted_result_shape}
        The predicted values.
    X_leaves : {X_leaves_shape}
        If ``pred_leaf=True``, the predicted leaf of every tree for each sample.
    X_SHAP_values : {X_SHAP_values_shape}
        If ``pred_contrib=True``, the feature contributions for each sample.
    """
)


378
379
class LGBMModel(_LGBMModelBase):
    """Implementation of the scikit-learn API for LightGBM."""
wxchan's avatar
wxchan committed
380

381
382
383
384
385
386
387
388
    def __init__(
        self,
        boosting_type: str = 'gbdt',
        num_leaves: int = 31,
        max_depth: int = -1,
        learning_rate: float = 0.1,
        n_estimators: int = 100,
        subsample_for_bin: int = 200000,
389
        objective: Optional[Union[str, _LGBM_ScikitCustomObjectiveFunction]] = None,
390
391
392
393
394
395
396
397
398
399
400
        class_weight: Optional[Union[Dict, str]] = None,
        min_split_gain: float = 0.,
        min_child_weight: float = 1e-3,
        min_child_samples: int = 20,
        subsample: float = 1.,
        subsample_freq: int = 0,
        colsample_bytree: float = 1.,
        reg_alpha: float = 0.,
        reg_lambda: float = 0.,
        random_state: Optional[Union[int, np.random.RandomState]] = None,
        n_jobs: int = -1,
401
        silent: Union[bool, str] = 'warn',
402
403
404
        importance_type: str = 'split',
        **kwargs
    ):
405
        r"""Construct a gradient boosting model.
wxchan's avatar
wxchan committed
406
407
408

        Parameters
        ----------
409
        boosting_type : str, optional (default='gbdt')
410
411
412
413
414
            'gbdt', traditional Gradient Boosting Decision Tree.
            'dart', Dropouts meet Multiple Additive Regression Trees.
            'goss', Gradient-based One-Side Sampling.
            'rf', Random Forest.
        num_leaves : int, optional (default=31)
wxchan's avatar
wxchan committed
415
            Maximum tree leaves for base learners.
416
        max_depth : int, optional (default=-1)
417
            Maximum tree depth for base learners, <=0 means no limit.
418
        learning_rate : float, optional (default=0.1)
419
            Boosting learning rate.
420
421
422
            You can use ``callbacks`` parameter of ``fit`` method to shrink/adapt learning rate
            in training using ``reset_parameter`` callback.
            Note, that this will ignore the ``learning_rate`` argument in training.
423
        n_estimators : int, optional (default=100)
wxchan's avatar
wxchan committed
424
            Number of boosted trees to fit.
425
        subsample_for_bin : int, optional (default=200000)
wxchan's avatar
wxchan committed
426
            Number of samples for constructing bins.
427
        objective : str, callable or None, optional (default=None)
wxchan's avatar
wxchan committed
428
429
            Specify the learning task and the corresponding learning objective or
            a custom objective function to be used (see note below).
430
            Default: 'regression' for LGBMRegressor, 'binary' or 'multiclass' for LGBMClassifier, 'lambdarank' for LGBMRanker.
431
432
433
434
        class_weight : dict, 'balanced' or None, optional (default=None)
            Weights associated with classes in the form ``{class_label: weight}``.
            Use this parameter only for multi-class classification task;
            for binary classification task you may use ``is_unbalance`` or ``scale_pos_weight`` parameters.
435
436
437
            Note, that the usage of all these parameters will result in poor estimates of the individual class probabilities.
            You may want to consider performing probability calibration
            (https://scikit-learn.org/stable/modules/calibration.html) of your model.
438
439
440
            The 'balanced' mode uses the values of y to automatically adjust weights
            inversely proportional to class frequencies in the input data as ``n_samples / (n_classes * np.bincount(y))``.
            If None, all classes are supposed to have weight one.
441
            Note, that these weights will be multiplied with ``sample_weight`` (passed through the ``fit`` method)
442
            if ``sample_weight`` is specified.
443
        min_split_gain : float, optional (default=0.)
wxchan's avatar
wxchan committed
444
            Minimum loss reduction required to make a further partition on a leaf node of the tree.
445
        min_child_weight : float, optional (default=1e-3)
446
            Minimum sum of instance weight (hessian) needed in a child (leaf).
447
        min_child_samples : int, optional (default=20)
448
            Minimum number of data needed in a child (leaf).
449
        subsample : float, optional (default=1.)
wxchan's avatar
wxchan committed
450
            Subsample ratio of the training instance.
451
        subsample_freq : int, optional (default=0)
Andrew Ziem's avatar
Andrew Ziem committed
452
            Frequency of subsample, <=0 means no enable.
453
        colsample_bytree : float, optional (default=1.)
wxchan's avatar
wxchan committed
454
            Subsample ratio of columns when constructing each tree.
455
        reg_alpha : float, optional (default=0.)
456
            L1 regularization term on weights.
457
        reg_lambda : float, optional (default=0.)
458
            L2 regularization term on weights.
459
        random_state : int, RandomState object or None, optional (default=None)
wxchan's avatar
wxchan committed
460
            Random number seed.
461
462
463
            If int, this number is used to seed the C++ code.
            If RandomState object (numpy), a random integer is picked based on its state to seed the C++ code.
            If None, default seeds in C++ code are used.
464
        n_jobs : int, optional (default=-1)
465
            Number of parallel threads to use for training (can be changed at prediction time).
466
        silent : bool, optional (default=True)
wxchan's avatar
wxchan committed
467
            Whether to print messages while running boosting.
468
        importance_type : str, optional (default='split')
469
            The type of feature importance to be filled into ``feature_importances_``.
470
471
472
473
            If 'split', result contains numbers of times the feature is used in a model.
            If 'gain', result contains total gains of splits which use the feature.
        **kwargs
            Other parameters for the model.
wxchan's avatar
wxchan committed
474
            Check http://lightgbm.readthedocs.io/en/latest/Parameters.html for more parameters.
475

Nikita Titov's avatar
Nikita Titov committed
476
477
478
            .. warning::

                \*\*kwargs is not supported in sklearn, it may cause unexpected issues.
wxchan's avatar
wxchan committed
479
480
481

        Note
        ----
482
483
        A custom objective function can be provided for the ``objective`` parameter.
        In this case, it should have the signature
484
485
        ``objective(y_true, y_pred) -> grad, hess`` or
        ``objective(y_true, y_pred, group) -> grad, hess``:
wxchan's avatar
wxchan committed
486

Nikita Titov's avatar
Nikita Titov committed
487
            y_true : array-like of shape = [n_samples]
488
                The target values.
Nikita Titov's avatar
Nikita Titov committed
489
            y_pred : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
490
                The predicted values.
491
492
                Predicted values are returned before any transformation,
                e.g. they are raw margin instead of probability of positive class for binary task.
Nikita Titov's avatar
Nikita Titov committed
493
            group : array-like
494
495
496
                Group/query data.
                Only used in the learning-to-rank task.
                sum(group) = n_samples.
497
498
                For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
                where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
Nikita Titov's avatar
Nikita Titov committed
499
            grad : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
500
501
                The value of the first order derivative (gradient) of the loss
                with respect to the elements of y_pred for each sample point.
Nikita Titov's avatar
Nikita Titov committed
502
            hess : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
503
504
                The value of the second order derivative (Hessian) of the loss
                with respect to the elements of y_pred for each sample point.
wxchan's avatar
wxchan committed
505

506
507
508
        For multi-class task, the y_pred is group by class_id first, then group by row_id.
        If you want to get i-th row y_pred in j-th class, the access way is y_pred[j * num_data + i]
        and you should group grad and hess in this way as well.
wxchan's avatar
wxchan committed
509
        """
wxchan's avatar
wxchan committed
510
        if not SKLEARN_INSTALLED:
511
512
            raise LightGBMError('scikit-learn is required for lightgbm.sklearn. '
                                'You must install scikit-learn and restart your session to use this module.')
wxchan's avatar
wxchan committed
513

514
        self.boosting_type = boosting_type
515
        self.objective = objective
wxchan's avatar
wxchan committed
516
517
518
519
        self.num_leaves = num_leaves
        self.max_depth = max_depth
        self.learning_rate = learning_rate
        self.n_estimators = n_estimators
wxchan's avatar
wxchan committed
520
        self.subsample_for_bin = subsample_for_bin
wxchan's avatar
wxchan committed
521
522
523
524
525
526
527
528
        self.min_split_gain = min_split_gain
        self.min_child_weight = min_child_weight
        self.min_child_samples = min_child_samples
        self.subsample = subsample
        self.subsample_freq = subsample_freq
        self.colsample_bytree = colsample_bytree
        self.reg_alpha = reg_alpha
        self.reg_lambda = reg_lambda
529
530
        self.random_state = random_state
        self.n_jobs = n_jobs
wxchan's avatar
wxchan committed
531
        self.silent = silent
532
        self.importance_type = importance_type
wxchan's avatar
wxchan committed
533
        self._Booster = None
534
535
536
537
        self._evals_result = None
        self._best_score = None
        self._best_iteration = None
        self._other_params = {}
538
        self._objective = objective
539
        self.class_weight = class_weight
540
541
        self._class_weight = None
        self._class_map = None
542
        self._n_features = None
543
        self._n_features_in = None
544
545
        self._classes = None
        self._n_classes = None
546
        self.set_params(**kwargs)
wxchan's avatar
wxchan committed
547

Nikita Titov's avatar
Nikita Titov committed
548
    def _more_tags(self):
549
550
551
552
553
554
555
556
557
558
        return {
            'allow_nan': True,
            'X_types': ['2darray', 'sparse', '1dlabels'],
            '_xfail_checks': {
                'check_no_attributes_set_in_init':
                'scikit-learn incorrectly asserts that private attributes '
                'cannot be set in __init__: '
                '(see https://github.com/microsoft/LightGBM/issues/2628)'
            }
        }
Nikita Titov's avatar
Nikita Titov committed
559

560
561
562
    def __sklearn_is_fitted__(self) -> bool:
        return getattr(self, "fitted_", False)

wxchan's avatar
wxchan committed
563
    def get_params(self, deep=True):
564
565
566
567
568
569
570
571
572
573
574
575
576
        """Get parameters for this estimator.

        Parameters
        ----------
        deep : bool, optional (default=True)
            If True, will return the parameters for this estimator and
            contained subobjects that are estimators.

        Returns
        -------
        params : dict
            Parameter names mapped to their values.
        """
577
        params = super().get_params(deep=deep)
578
        params.update(self._other_params)
wxchan's avatar
wxchan committed
579
580
581
        return params

    def set_params(self, **params):
582
583
584
585
586
587
588
589
590
591
592
593
        """Set the parameters of this estimator.

        Parameters
        ----------
        **params
            Parameter names with their new values.

        Returns
        -------
        self : object
            Returns self.
        """
wxchan's avatar
wxchan committed
594
595
        for key, value in params.items():
            setattr(self, key, value)
596
597
            if hasattr(self, f"_{key}"):
                setattr(self, f"_{key}", value)
598
            self._other_params[key] = value
wxchan's avatar
wxchan committed
599
        return self
wxchan's avatar
wxchan committed
600

Guolin Ke's avatar
Guolin Ke committed
601
    def fit(self, X, y,
602
            sample_weight=None, init_score=None, group=None,
603
            eval_set=None, eval_names=None, eval_sample_weight=None,
604
            eval_class_weight=None, eval_init_score=None, eval_group=None,
605
            eval_metric=None, early_stopping_rounds=None, verbose='warn',
606
607
            feature_name='auto', categorical_feature='auto',
            callbacks=None, init_model=None):
608
        """Docstring is set after definition, using a template."""
609
610
611
612
613
614
615
        params = self.get_params()

        params.pop('objective', None)
        for alias in _ConfigAliases.get('objective'):
            if alias in params:
                self._objective = params.pop(alias)
                _log_warning(f"Found '{alias}' in params. Will use it instead of 'objective' argument")
616
617
618
619
        if self._objective is None:
            if isinstance(self, LGBMRegressor):
                self._objective = "regression"
            elif isinstance(self, LGBMClassifier):
620
621
622
623
                if self._n_classes > 2:
                    self._objective = "multiclass"
                else:
                    self._objective = "binary"
624
625
626
627
628
            elif isinstance(self, LGBMRanker):
                self._objective = "lambdarank"
            else:
                raise ValueError("Unknown LGBMModel type.")
        if callable(self._objective):
629
            self._fobj = _ObjectiveFunctionWrapper(self._objective)
630
            params['objective'] = 'None'  # objective = nullptr for unknown objective
631
632
        else:
            self._fobj = None
633
            params['objective'] = self._objective
634

wxchan's avatar
wxchan committed
635
        # user can set verbose with kwargs, it has higher priority
636
637
638
639
640
641
642
        if self.silent != "warn":
            _log_warning("'silent' argument is deprecated and will be removed in a future release of LightGBM. "
                         "Pass 'verbose' parameter via keyword arguments instead.")
            silent = self.silent
        else:
            silent = True
        if not any(verbose_alias in params for verbose_alias in _ConfigAliases.get("verbosity")) and silent:
643
            params['verbose'] = -1
wxchan's avatar
wxchan committed
644
        params.pop('silent', None)
645

646
        params.pop('importance_type', None)
wxchan's avatar
wxchan committed
647
        params.pop('n_estimators', None)
648
        params.pop('class_weight', None)
649

650
651
        if isinstance(params['random_state'], np.random.RandomState):
            params['random_state'] = params['random_state'].randint(np.iinfo(np.int32).max)
652
        if self._n_classes is not None and self._n_classes > 2:
653
654
            for alias in _ConfigAliases.get('num_class'):
                params.pop(alias, None)
655
656
            params['num_class'] = self._n_classes
        if hasattr(self, '_eval_at'):
657
            eval_at = self._eval_at
658
            for alias in _ConfigAliases.get('eval_at'):
659
660
661
662
                if alias in params:
                    _log_warning(f"Found '{alias}' in params. Will use it instead of 'eval_at' argument")
                    eval_at = params.pop(alias)
            params['eval_at'] = eval_at
wxchan's avatar
wxchan committed
663

664
665
666
667
668
669
670
671
        # Do not modify original args in fit function
        # Refer to https://github.com/microsoft/LightGBM/pull/2619
        eval_metric_list = copy.deepcopy(eval_metric)
        if not isinstance(eval_metric_list, list):
            eval_metric_list = [eval_metric_list]

        # Separate built-in from callable evaluation metrics
        eval_metrics_callable = [_EvalFunctionWrapper(f) for f in eval_metric_list if callable(f)]
672
        eval_metrics_builtin = [m for m in eval_metric_list if isinstance(m, str)]
673
674

        # register default metric for consistency with callable eval_metric case
675
        original_metric = self._objective if isinstance(self._objective, str) else None
676
677
678
679
680
681
682
683
684
685
        if original_metric is None:
            # try to deduce from class instance
            if isinstance(self, LGBMRegressor):
                original_metric = "l2"
            elif isinstance(self, LGBMClassifier):
                original_metric = "multi_logloss" if self._n_classes > 2 else "binary_logloss"
            elif isinstance(self, LGBMRanker):
                original_metric = "ndcg"

        # overwrite default metric by explicitly set metric
686
        params = _choose_param_value("metric", params, original_metric)
687
688

        # concatenate metric from params (or default if not provided in params) and eval_metric
689
690
        params['metric'] = [params['metric']] if isinstance(params['metric'], (str, type(None))) else params['metric']
        params['metric'] = [e for e in eval_metrics_builtin if e not in params['metric']] + params['metric']
691
        params['metric'] = [metric for metric in params['metric'] if metric is not None]
wxchan's avatar
wxchan committed
692

693
        if not isinstance(X, (pd_DataFrame, dt_DataTable)):
694
            _X, _y = _LGBMCheckXY(X, y, accept_sparse=True, force_all_finite=False, ensure_min_samples=2)
695
696
            if sample_weight is not None:
                sample_weight = _LGBMCheckSampleWeight(sample_weight, _X)
697
698
        else:
            _X, _y = X, y
699

700
701
702
703
        if self._class_weight is None:
            self._class_weight = self.class_weight
        if self._class_weight is not None:
            class_sample_weight = _LGBMComputeSampleWeight(self._class_weight, y)
704
705
706
707
            if sample_weight is None or len(sample_weight) == 0:
                sample_weight = class_sample_weight
            else:
                sample_weight = np.multiply(sample_weight, class_sample_weight)
708

709
        self._n_features = _X.shape[1]
710
711
        # copy for consistency
        self._n_features_in = self._n_features
712

713
714
        def _construct_dataset(X, y, sample_weight, init_score, group, params,
                               categorical_feature='auto'):
715
            return Dataset(X, label=y, weight=sample_weight, group=group,
716
717
                           init_score=init_score, params=params,
                           categorical_feature=categorical_feature)
Guolin Ke's avatar
Guolin Ke committed
718

719
720
        train_set = _construct_dataset(_X, _y, sample_weight, init_score, group, params,
                                       categorical_feature=categorical_feature)
Guolin Ke's avatar
Guolin Ke committed
721
722
723

        valid_sets = []
        if eval_set is not None:
724

725
            def _get_meta_data(collection, name, i):
726
727
728
729
730
731
732
                if collection is None:
                    return None
                elif isinstance(collection, list):
                    return collection[i] if len(collection) > i else None
                elif isinstance(collection, dict):
                    return collection.get(i, None)
                else:
733
                    raise TypeError(f"{name} should be dict or list")
734

Guolin Ke's avatar
Guolin Ke committed
735
736
737
            if isinstance(eval_set, tuple):
                eval_set = [eval_set]
            for i, valid_data in enumerate(eval_set):
738
                # reduce cost for prediction training data
Guolin Ke's avatar
Guolin Ke committed
739
740
741
                if valid_data[0] is X and valid_data[1] is y:
                    valid_set = train_set
                else:
742
743
744
745
746
747
                    valid_weight = _get_meta_data(eval_sample_weight, 'eval_sample_weight', i)
                    valid_class_weight = _get_meta_data(eval_class_weight, 'eval_class_weight', i)
                    if valid_class_weight is not None:
                        if isinstance(valid_class_weight, dict) and self._class_map is not None:
                            valid_class_weight = {self._class_map[k]: v for k, v in valid_class_weight.items()}
                        valid_class_sample_weight = _LGBMComputeSampleWeight(valid_class_weight, valid_data[1])
748
749
750
751
                        if valid_weight is None or len(valid_weight) == 0:
                            valid_weight = valid_class_sample_weight
                        else:
                            valid_weight = np.multiply(valid_weight, valid_class_sample_weight)
752
753
                    valid_init_score = _get_meta_data(eval_init_score, 'eval_init_score', i)
                    valid_group = _get_meta_data(eval_group, 'eval_group', i)
754
755
                    valid_set = _construct_dataset(valid_data[0], valid_data[1],
                                                   valid_weight, valid_init_score, valid_group, params)
Guolin Ke's avatar
Guolin Ke committed
756
757
                valid_sets.append(valid_set)

758
759
760
        if isinstance(init_model, LGBMModel):
            init_model = init_model.booster_

761
762
763
764
765
766
767
768
        if early_stopping_rounds is not None and early_stopping_rounds > 0:
            _log_warning("'early_stopping_rounds' argument is deprecated and will be removed in a future release of LightGBM. "
                         "Pass 'early_stopping()' callback via 'callbacks' argument instead.")
            params['early_stopping_rounds'] = early_stopping_rounds

        if callbacks is None:
            callbacks = []
        else:
769
            callbacks = copy.copy(callbacks)  # don't use deepcopy here to allow non-serializable objects
770
771
772

        if verbose != 'warn':
            _log_warning("'verbose' argument is deprecated and will be removed in a future release of LightGBM. "
773
                         "Pass 'log_evaluation()' callback via 'callbacks' argument instead.")
774
        else:
775
            if callbacks:  # assume user has already specified log_evaluation callback
776
777
778
                verbose = False
            else:
                verbose = True
779
        callbacks.append(log_evaluation(int(verbose)))
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795

        evals_result = {}
        callbacks.append(record_evaluation(evals_result))

        self._Booster = train(
            params=params,
            train_set=train_set,
            num_boost_round=self.n_estimators,
            valid_sets=valid_sets,
            valid_names=eval_names,
            fobj=self._fobj,
            feval=eval_metrics_callable,
            init_model=init_model,
            feature_name=feature_name,
            callbacks=callbacks
        )
wxchan's avatar
wxchan committed
796
797

        if evals_result:
798
            self._evals_result = evals_result
799
800
        else:  # reset after previous call to fit()
            self._evals_result = None
wxchan's avatar
wxchan committed
801

802
        if self._Booster.best_iteration != 0:
803
            self._best_iteration = self._Booster.best_iteration
804
805
        else:  # reset after previous call to fit()
            self._best_iteration = None
806
807

        self._best_score = self._Booster.best_score
wxchan's avatar
wxchan committed
808

809
810
        self.fitted_ = True

wxchan's avatar
wxchan committed
811
        # free dataset
812
        self._Booster.free_dataset()
wxchan's avatar
wxchan committed
813
        del train_set, valid_sets
wxchan's avatar
wxchan committed
814
815
        return self

816
817
818
819
    fit.__doc__ = _lgbmmodel_doc_fit.format(
        X_shape="array-like or sparse matrix of shape = [n_samples, n_features]",
        y_shape="array-like of shape = [n_samples]",
        sample_weight_shape="array-like of shape = [n_samples] or None, optional (default=None)",
820
        init_score_shape="array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task) or shape = [n_samples, n_classes] (for multi-class task) or None, optional (default=None)",
821
        group_shape="array-like or None, optional (default=None)",
822
823
824
        eval_sample_weight_shape="list of array, or None, optional (default=None)",
        eval_init_score_shape="list of array, or None, optional (default=None)",
        eval_group_shape="list of array, or None, optional (default=None)"
825
826
    ) + "\n\n" + _lgbmmodel_doc_custom_eval_note

827
    def predict(self, X, raw_score=False, start_iteration=0, num_iteration=None,
828
                pred_leaf=False, pred_contrib=False, **kwargs):
829
        """Docstring is set after definition, using a template."""
830
        if not self.__sklearn_is_fitted__():
831
            raise LGBMNotFittedError("Estimator not fitted, call fit before exploiting the model.")
832
        if not isinstance(X, (pd_DataFrame, dt_DataTable)):
833
            X = _LGBMCheckArray(X, accept_sparse=True, force_all_finite=False)
834
835
836
        n_features = X.shape[1]
        if self._n_features != n_features:
            raise ValueError("Number of features of the model must "
837
838
                             f"match the input. Model n_features_ is {self._n_features} and "
                             f"input n_features is {n_features}")
839
        return self._Booster.predict(X, raw_score=raw_score, start_iteration=start_iteration, num_iteration=num_iteration,
840
                                     pred_leaf=pred_leaf, pred_contrib=pred_contrib, **kwargs)
wxchan's avatar
wxchan committed
841

842
843
844
845
846
847
848
849
850
    predict.__doc__ = _lgbmmodel_doc_predict.format(
        description="Return the predicted value for each sample.",
        X_shape="array-like or sparse matrix of shape = [n_samples, n_features]",
        output_name="predicted_result",
        predicted_result_shape="array-like of shape = [n_samples] or shape = [n_samples, n_classes]",
        X_leaves_shape="array-like of shape = [n_samples, n_trees] or shape = [n_samples, n_trees * n_classes]",
        X_SHAP_values_shape="array-like of shape = [n_samples, n_features + 1] or shape = [n_samples, (n_features + 1) * n_classes] or list with n_classes length of such objects"
    )

851
852
    @property
    def n_features_(self):
853
        """:obj:`int`: The number of features of fitted model."""
854
        if not self.__sklearn_is_fitted__():
855
856
857
            raise LGBMNotFittedError('No n_features found. Need to call fit beforehand.')
        return self._n_features

858
859
860
    @property
    def n_features_in_(self):
        """:obj:`int`: The number of features of fitted model."""
861
        if not self.__sklearn_is_fitted__():
862
863
864
            raise LGBMNotFittedError('No n_features_in found. Need to call fit beforehand.')
        return self._n_features_in

865
866
    @property
    def best_score_(self):
867
        """:obj:`dict`: The best score of fitted model."""
868
        if not self.__sklearn_is_fitted__():
869
870
871
872
873
            raise LGBMNotFittedError('No best_score found. Need to call fit beforehand.')
        return self._best_score

    @property
    def best_iteration_(self):
874
        """:obj:`int` or :obj:`None`: The best iteration of fitted model if ``early_stopping()`` callback has been specified."""
875
        if not self.__sklearn_is_fitted__():
876
            raise LGBMNotFittedError('No best_iteration found. Need to call fit with early_stopping callback beforehand.')
877
878
879
880
        return self._best_iteration

    @property
    def objective_(self):
881
        """:obj:`str` or :obj:`callable`: The concrete objective used while fitting this model."""
882
        if not self.__sklearn_is_fitted__():
883
884
885
            raise LGBMNotFittedError('No objective found. Need to call fit beforehand.')
        return self._objective

886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
    @property
    def n_estimators_(self) -> int:
        """:obj:`int`: True number of boosting iterations performed.

        This might be less than parameter ``n_estimators`` if early stopping was enabled or
        if boosting stopped early due to limits on complexity like ``min_gain_to_split``.
        """
        if not self.__sklearn_is_fitted__():
            raise LGBMNotFittedError('No n_estimators found. Need to call fit beforehand.')
        return self._Booster.current_iteration()

    @property
    def n_iter_(self) -> int:
        """:obj:`int`: True number of boosting iterations performed.

        This might be less than parameter ``n_estimators`` if early stopping was enabled or
        if boosting stopped early due to limits on complexity like ``min_gain_to_split``.
        """
        if not self.__sklearn_is_fitted__():
            raise LGBMNotFittedError('No n_iter found. Need to call fit beforehand.')
        return self._Booster.current_iteration()

908
909
    @property
    def booster_(self):
910
        """Booster: The underlying Booster of this model."""
911
        if not self.__sklearn_is_fitted__():
912
            raise LGBMNotFittedError('No booster found. Need to call fit beforehand.')
913
        return self._Booster
wxchan's avatar
wxchan committed
914

915
916
    @property
    def evals_result_(self):
917
        """:obj:`dict` or :obj:`None`: The evaluation results if validation sets have been specified."""
918
        if not self.__sklearn_is_fitted__():
919
920
            raise LGBMNotFittedError('No results found. Need to call fit with eval_set beforehand.')
        return self._evals_result
921
922

    @property
923
    def feature_importances_(self):
924
        """:obj:`array` of shape = [n_features]: The feature importances (the higher, the more important).
925

Nikita Titov's avatar
Nikita Titov committed
926
927
928
929
        .. note::

            ``importance_type`` attribute is passed to the function
            to configure the type of importance values to be extracted.
930
        """
931
        if not self.__sklearn_is_fitted__():
932
            raise LGBMNotFittedError('No feature_importances found. Need to call fit beforehand.')
933
        return self._Booster.feature_importance(importance_type=self.importance_type)
wxchan's avatar
wxchan committed
934

935
936
    @property
    def feature_name_(self):
937
        """:obj:`array` of shape = [n_features]: The names of features."""
938
        if not self.__sklearn_is_fitted__():
939
940
941
            raise LGBMNotFittedError('No feature_name found. Need to call fit beforehand.')
        return self._Booster.feature_name()

wxchan's avatar
wxchan committed
942

943
class LGBMRegressor(_LGBMRegressorBase, LGBMModel):
944
    """LightGBM regressor."""
wxchan's avatar
wxchan committed
945

Guolin Ke's avatar
Guolin Ke committed
946
947
    def fit(self, X, y,
            sample_weight=None, init_score=None,
948
            eval_set=None, eval_names=None, eval_sample_weight=None,
949
            eval_init_score=None, eval_metric=None, early_stopping_rounds=None,
950
            verbose='warn', feature_name='auto', categorical_feature='auto',
951
            callbacks=None, init_model=None):
952
        """Docstring is inherited from the LGBMModel."""
953
954
955
956
957
        super().fit(X, y, sample_weight=sample_weight, init_score=init_score,
                    eval_set=eval_set, eval_names=eval_names, eval_sample_weight=eval_sample_weight,
                    eval_init_score=eval_init_score, eval_metric=eval_metric,
                    early_stopping_rounds=early_stopping_rounds, verbose=verbose, feature_name=feature_name,
                    categorical_feature=categorical_feature, callbacks=callbacks, init_model=init_model)
Guolin Ke's avatar
Guolin Ke committed
958
959
        return self

960
    _base_doc = LGBMModel.fit.__doc__.replace("self : LGBMModel", "self : LGBMRegressor")
961
962
    _base_doc = (_base_doc[:_base_doc.find('group :')]  # type: ignore
                 + _base_doc[_base_doc.find('eval_set :'):])  # type: ignore
963
964
965
966
    _base_doc = (_base_doc[:_base_doc.find('eval_class_weight :')]
                 + _base_doc[_base_doc.find('eval_init_score :'):])
    fit.__doc__ = (_base_doc[:_base_doc.find('eval_group :')]
                   + _base_doc[_base_doc.find('eval_metric :'):])
wxchan's avatar
wxchan committed
967

968

969
class LGBMClassifier(_LGBMClassifierBase, LGBMModel):
970
    """LightGBM classifier."""
wxchan's avatar
wxchan committed
971

Guolin Ke's avatar
Guolin Ke committed
972
973
    def fit(self, X, y,
            sample_weight=None, init_score=None,
974
            eval_set=None, eval_names=None, eval_sample_weight=None,
975
            eval_class_weight=None, eval_init_score=None, eval_metric=None,
976
            early_stopping_rounds=None, verbose='warn',
977
978
            feature_name='auto', categorical_feature='auto',
            callbacks=None, init_model=None):
979
        """Docstring is inherited from the LGBMModel."""
980
        _LGBMAssertAllFinite(y)
981
982
        _LGBMCheckClassificationTargets(y)
        self._le = _LGBMLabelEncoder().fit(y)
983
        _y = self._le.transform(y)
984
        self._class_map = dict(zip(self._le.classes_, self._le.transform(self._le.classes_)))
985
986
        if isinstance(self.class_weight, dict):
            self._class_weight = {self._class_map[k]: v for k, v in self.class_weight.items()}
987

988
989
        self._classes = self._le.classes_
        self._n_classes = len(self._classes)
990
991

        if not callable(eval_metric):
992
            if isinstance(eval_metric, (str, type(None))):
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
                eval_metric = [eval_metric]
            if self._n_classes > 2:
                for index, metric in enumerate(eval_metric):
                    if metric in {'logloss', 'binary_logloss'}:
                        eval_metric[index] = "multi_logloss"
                    elif metric in {'error', 'binary_error'}:
                        eval_metric[index] = "multi_error"
            else:
                for index, metric in enumerate(eval_metric):
                    if metric in {'logloss', 'multi_logloss'}:
                        eval_metric[index] = 'binary_logloss'
                    elif metric in {'error', 'multi_error'}:
                        eval_metric[index] = 'binary_error'
wxchan's avatar
wxchan committed
1006

1007
1008
        # do not modify args, as it causes errors in model selection tools
        valid_sets = None
wxchan's avatar
wxchan committed
1009
        if eval_set is not None:
1010
1011
            if isinstance(eval_set, tuple):
                eval_set = [eval_set]
1012
            valid_sets = [None] * len(eval_set)
1013
1014
            for i, (valid_x, valid_y) in enumerate(eval_set):
                if valid_x is X and valid_y is y:
1015
                    valid_sets[i] = (valid_x, _y)
1016
                else:
1017
                    valid_sets[i] = (valid_x, self._le.transform(valid_y))
1018

1019
1020
1021
1022
1023
1024
        super().fit(X, _y, sample_weight=sample_weight, init_score=init_score, eval_set=valid_sets,
                    eval_names=eval_names, eval_sample_weight=eval_sample_weight,
                    eval_class_weight=eval_class_weight, eval_init_score=eval_init_score,
                    eval_metric=eval_metric, early_stopping_rounds=early_stopping_rounds,
                    verbose=verbose, feature_name=feature_name, categorical_feature=categorical_feature,
                    callbacks=callbacks, init_model=init_model)
wxchan's avatar
wxchan committed
1025
1026
        return self

1027
    _base_doc = LGBMModel.fit.__doc__.replace("self : LGBMModel", "self : LGBMClassifier")
1028
1029
    _base_doc = (_base_doc[:_base_doc.find('group :')]  # type: ignore
                 + _base_doc[_base_doc.find('eval_set :'):])  # type: ignore
1030
1031
    fit.__doc__ = (_base_doc[:_base_doc.find('eval_group :')]
                   + _base_doc[_base_doc.find('eval_metric :'):])
1032

1033
    def predict(self, X, raw_score=False, start_iteration=0, num_iteration=None,
1034
                pred_leaf=False, pred_contrib=False, **kwargs):
1035
        """Docstring is inherited from the LGBMModel."""
1036
        result = self.predict_proba(X, raw_score, start_iteration, num_iteration,
1037
                                    pred_leaf, pred_contrib, **kwargs)
1038
        if callable(self._objective) or raw_score or pred_leaf or pred_contrib:
1039
1040
1041
1042
            return result
        else:
            class_index = np.argmax(result, axis=1)
            return self._le.inverse_transform(class_index)
wxchan's avatar
wxchan committed
1043

1044
1045
    predict.__doc__ = LGBMModel.predict.__doc__

1046
    def predict_proba(self, X, raw_score=False, start_iteration=0, num_iteration=None,
1047
                      pred_leaf=False, pred_contrib=False, **kwargs):
1048
        """Docstring is set after definition, using a template."""
1049
        result = super().predict(X, raw_score, start_iteration, num_iteration, pred_leaf, pred_contrib, **kwargs)
1050
        if callable(self._objective) and not (raw_score or pred_leaf or pred_contrib):
1051
1052
1053
            _log_warning("Cannot compute class probabilities or labels "
                         "due to the usage of customized objective function.\n"
                         "Returning raw scores instead.")
1054
1055
            return result
        elif self._n_classes > 2 or raw_score or pred_leaf or pred_contrib:
1056
            return result
wxchan's avatar
wxchan committed
1057
        else:
1058
            return np.vstack((1. - result, result)).transpose()
1059

1060
1061
1062
1063
    predict_proba.__doc__ = _lgbmmodel_doc_predict.format(
        description="Return the predicted probability for each class for each sample.",
        X_shape="array-like or sparse matrix of shape = [n_samples, n_features]",
        output_name="predicted_probability",
1064
        predicted_result_shape="array-like of shape = [n_samples] or shape = [n_samples, n_classes]",
1065
1066
1067
1068
        X_leaves_shape="array-like of shape = [n_samples, n_trees] or shape = [n_samples, n_trees * n_classes]",
        X_SHAP_values_shape="array-like of shape = [n_samples, n_features + 1] or shape = [n_samples, (n_features + 1) * n_classes] or list with n_classes length of such objects"
    )

1069
1070
    @property
    def classes_(self):
1071
        """:obj:`array` of shape = [n_classes]: The class label array."""
1072
        if not self.__sklearn_is_fitted__():
1073
1074
            raise LGBMNotFittedError('No classes found. Need to call fit beforehand.')
        return self._classes
1075
1076
1077

    @property
    def n_classes_(self):
1078
        """:obj:`int`: The number of classes."""
1079
        if not self.__sklearn_is_fitted__():
1080
1081
            raise LGBMNotFittedError('No classes found. Need to call fit beforehand.')
        return self._n_classes
wxchan's avatar
wxchan committed
1082

wxchan's avatar
wxchan committed
1083

wxchan's avatar
wxchan committed
1084
class LGBMRanker(LGBMModel):
1085
1086
1087
1088
1089
1090
1091
1092
    """LightGBM ranker.

    .. warning::

        scikit-learn doesn't support ranking applications yet,
        therefore this class is not really compatible with the sklearn ecosystem.
        Please use this class mainly for training and applying ranking models in common sklearnish way.
    """
wxchan's avatar
wxchan committed
1093

Guolin Ke's avatar
Guolin Ke committed
1094
    def fit(self, X, y,
1095
            sample_weight=None, init_score=None, group=None,
1096
            eval_set=None, eval_names=None, eval_sample_weight=None,
1097
            eval_init_score=None, eval_group=None, eval_metric=None,
1098
            eval_at=(1, 2, 3, 4, 5), early_stopping_rounds=None, verbose='warn',
1099
1100
            feature_name='auto', categorical_feature='auto',
            callbacks=None, init_model=None):
1101
        """Docstring is inherited from the LGBMModel."""
1102
        # check group data
Guolin Ke's avatar
Guolin Ke committed
1103
        if group is None:
1104
            raise ValueError("Should set group for ranking task")
wxchan's avatar
wxchan committed
1105
1106

        if eval_set is not None:
Guolin Ke's avatar
Guolin Ke committed
1107
            if eval_group is None:
1108
                raise ValueError("Eval_group cannot be None when eval_set is not None")
Guolin Ke's avatar
Guolin Ke committed
1109
            elif len(eval_group) != len(eval_set):
1110
                raise ValueError("Length of eval_group should be equal to eval_set")
1111
            elif (isinstance(eval_group, dict)
1112
                  and any(i not in eval_group or eval_group[i] is None for i in range(len(eval_group)))
1113
1114
                  or isinstance(eval_group, list)
                  and any(group is None for group in eval_group)):
1115
1116
                raise ValueError("Should set group for all eval datasets for ranking task; "
                                 "if you use dict, the index should start from 0")
1117

1118
        self._eval_at = eval_at
1119
1120
1121
1122
1123
        super().fit(X, y, sample_weight=sample_weight, init_score=init_score, group=group,
                    eval_set=eval_set, eval_names=eval_names, eval_sample_weight=eval_sample_weight,
                    eval_init_score=eval_init_score, eval_group=eval_group, eval_metric=eval_metric,
                    early_stopping_rounds=early_stopping_rounds, verbose=verbose, feature_name=feature_name,
                    categorical_feature=categorical_feature, callbacks=callbacks, init_model=init_model)
wxchan's avatar
wxchan committed
1124
        return self
1125

1126
    _base_doc = LGBMModel.fit.__doc__.replace("self : LGBMModel", "self : LGBMRanker")
1127
1128
    fit.__doc__ = (_base_doc[:_base_doc.find('eval_class_weight :')]  # type: ignore
                   + _base_doc[_base_doc.find('eval_init_score :'):])  # type: ignore
1129
    _base_doc = fit.__doc__
1130
    _before_early_stop, _early_stop, _after_early_stop = _base_doc.partition('early_stopping_rounds :')
1131
1132
1133
    fit.__doc__ = f"""{_before_early_stop}eval_at : iterable of int, optional (default=(1, 2, 3, 4, 5))
        The evaluation positions of the specified metric.
    {_early_stop}{_after_early_stop}"""