sklearn.py 56.4 KB
Newer Older
wxchan's avatar
wxchan committed
1
# coding: utf-8
2
"""Scikit-learn wrapper interface for LightGBM."""
3
import copy
4
from inspect import signature
5
from pathlib import Path
6
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
7

wxchan's avatar
wxchan committed
8
import numpy as np
9

10
11
12
from .basic import (Booster, Dataset, LightGBMError, _choose_param_value, _ConfigAliases, _LGBM_BoosterBestScoreType,
                    _LGBM_EvalFunctionResultType, _log_warning)
from .callback import _EvalResultDict, record_evaluation
13
14
from .compat import (SKLEARN_INSTALLED, LGBMNotFittedError, _LGBMAssertAllFinite, _LGBMCheckArray,
                     _LGBMCheckClassificationTargets, _LGBMCheckSampleWeight, _LGBMCheckXY, _LGBMClassifierBase,
15
16
                     _LGBMComputeSampleWeight, _LGBMCpuCount, _LGBMLabelEncoder, _LGBMModelBase, _LGBMRegressorBase,
                     dt_DataTable, pd_DataFrame)
wxchan's avatar
wxchan committed
17
from .engine import train
18

19
20
21
22
23
24
25
__all__ = [
    'LGBMClassifier',
    'LGBMModel',
    'LGBMRanker',
    'LGBMRegressor',
]

26
27
28
_LGBM_ScikitCustomObjectiveFunction = Union[
    Callable[
        [np.ndarray, np.ndarray],
29
        Tuple[np.ndarray, np.ndarray]
30
31
32
    ],
    Callable[
        [np.ndarray, np.ndarray, np.ndarray],
33
        Tuple[np.ndarray, np.ndarray]
34
    ],
35
36
37
38
    Callable[
        [np.ndarray, np.ndarray, np.ndarray, np.ndarray],
        Tuple[np.ndarray, np.ndarray]
    ],
39
40
41
42
]
_LGBM_ScikitCustomEvalFunction = Union[
    Callable[
        [np.ndarray, np.ndarray],
43
        Union[_LGBM_EvalFunctionResultType, List[_LGBM_EvalFunctionResultType]]
44
45
46
    ],
    Callable[
        [np.ndarray, np.ndarray, np.ndarray],
47
        Union[_LGBM_EvalFunctionResultType, List[_LGBM_EvalFunctionResultType]]
48
49
50
    ],
    Callable[
        [np.ndarray, np.ndarray, np.ndarray, np.ndarray],
51
        Union[_LGBM_EvalFunctionResultType, List[_LGBM_EvalFunctionResultType]]
52
53
    ],
]
54
55
56
57
58
_LGBM_ScikitEvalMetricType = Union[
    str,
    _LGBM_ScikitCustomEvalFunction,
    List[Union[str, _LGBM_ScikitCustomEvalFunction]]
]
59

wxchan's avatar
wxchan committed
60

61
class _ObjectiveFunctionWrapper:
62
    """Proxy class for objective function."""
63

64
    def __init__(self, func: _LGBM_ScikitCustomObjectiveFunction):
65
        """Construct a proxy class.
66

67
68
        This class transforms objective function to match objective function with signature ``new_func(preds, dataset)``
        as expected by ``lightgbm.engine.train``.
69

70
71
72
        Parameters
        ----------
        func : callable
73
74
75
76
            Expects a callable with following signatures:
            ``func(y_true, y_pred)``,
            ``func(y_true, y_pred, weight)``
            or ``func(y_true, y_pred, weight, group)``
77
78
            and returns (grad, hess):

79
                y_true : numpy 1-D array of shape = [n_samples]
80
                    The target values.
81
                y_pred : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
82
                    The predicted values.
83
84
                    Predicted values are returned before any transformation,
                    e.g. they are raw margin instead of probability of positive class for binary task.
85
86
                weight : numpy 1-D array of shape = [n_samples]
                    The weight of samples. Weights should be non-negative.
87
                group : numpy 1-D array
88
89
90
                    Group/query data.
                    Only used in the learning-to-rank task.
                    sum(group) = n_samples.
91
92
                    For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
                    where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
93
                grad : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape [n_samples, n_classes] (for multi-class task)
94
95
                    The value of the first order derivative (gradient) of the loss
                    with respect to the elements of y_pred for each sample point.
96
                hess : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
97
98
                    The value of the second order derivative (Hessian) of the loss
                    with respect to the elements of y_pred for each sample point.
wxchan's avatar
wxchan committed
99

Nikita Titov's avatar
Nikita Titov committed
100
101
        .. note::

102
            For multi-class task, y_pred is a numpy 2-D array of shape = [n_samples, n_classes],
103
            and grad and hess should be returned in the same format.
104
105
        """
        self.func = func
wxchan's avatar
wxchan committed
106

107
    def __call__(self, preds: np.ndarray, dataset: Dataset) -> Tuple[np.ndarray, np.ndarray]:
108
109
110
111
        """Call passed function with appropriate arguments.

        Parameters
        ----------
112
        preds : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
113
114
115
116
117
118
            The predicted values.
        dataset : Dataset
            The training dataset.

        Returns
        -------
119
        grad : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
120
121
            The value of the first order derivative (gradient) of the loss
            with respect to the elements of preds for each sample point.
122
        hess : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
123
124
            The value of the second order derivative (Hessian) of the loss
            with respect to the elements of preds for each sample point.
125
        """
wxchan's avatar
wxchan committed
126
        labels = dataset.get_label()
127
        argc = len(signature(self.func).parameters)
128
        if argc == 2:
129
            grad, hess = self.func(labels, preds)
130
        elif argc == 3:
131
132
133
            grad, hess = self.func(labels, preds, dataset.get_weight())
        elif argc == 4:
            grad, hess = self.func(labels, preds, dataset.get_weight(), dataset.get_group())
134
        else:
135
            raise TypeError(f"Self-defined objective function should have 2, 3 or 4 arguments, got {argc}")
wxchan's avatar
wxchan committed
136
137
        return grad, hess

wxchan's avatar
wxchan committed
138

139
class _EvalFunctionWrapper:
140
    """Proxy class for evaluation function."""
141

142
    def __init__(self, func: _LGBM_ScikitCustomEvalFunction):
143
        """Construct a proxy class.
144

145
146
        This class transforms evaluation function to match evaluation function with signature ``new_func(preds, dataset)``
        as expected by ``lightgbm.engine.train``.
147

148
149
150
151
152
153
154
155
156
157
        Parameters
        ----------
        func : callable
            Expects a callable with following signatures:
            ``func(y_true, y_pred)``,
            ``func(y_true, y_pred, weight)``
            or ``func(y_true, y_pred, weight, group)``
            and returns (eval_name, eval_result, is_higher_better) or
            list of (eval_name, eval_result, is_higher_better):

158
                y_true : numpy 1-D array of shape = [n_samples]
159
                    The target values.
160
                y_pred : numpy 1-D array of shape = [n_samples] or numpy 2-D array shape = [n_samples, n_classes] (for multi-class task)
161
                    The predicted values.
162
163
                    In case of custom ``objective``, predicted values are returned before any transformation,
                    e.g. they are raw margin instead of probability of positive class for binary task in this case.
164
                weight : numpy 1-D array of shape = [n_samples]
165
                    The weight of samples. Weights should be non-negative.
166
                group : numpy 1-D array
167
168
169
                    Group/query data.
                    Only used in the learning-to-rank task.
                    sum(group) = n_samples.
170
171
                    For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
                    where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
172
                eval_name : str
Andrew Ziem's avatar
Andrew Ziem committed
173
                    The name of evaluation function (without whitespace).
174
175
176
177
178
179
                eval_result : float
                    The eval result.
                is_higher_better : bool
                    Is eval result higher better, e.g. AUC is ``is_higher_better``.
        """
        self.func = func
180

181
182
183
184
185
    def __call__(
        self,
        preds: np.ndarray,
        dataset: Dataset
    ) -> Union[_LGBM_EvalFunctionResultType, List[_LGBM_EvalFunctionResultType]]:
186
        """Call passed function with appropriate arguments.
187

188
189
        Parameters
        ----------
190
        preds : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
191
192
193
194
195
196
            The predicted values.
        dataset : Dataset
            The training dataset.

        Returns
        -------
197
        eval_name : str
Andrew Ziem's avatar
Andrew Ziem committed
198
            The name of evaluation function (without whitespace).
199
200
201
202
203
        eval_result : float
            The eval result.
        is_higher_better : bool
            Is eval result higher better, e.g. AUC is ``is_higher_better``.
        """
204
        labels = dataset.get_label()
205
        argc = len(signature(self.func).parameters)
206
        if argc == 2:
207
            return self.func(labels, preds)
208
        elif argc == 3:
209
            return self.func(labels, preds, dataset.get_weight())
210
        elif argc == 4:
211
            return self.func(labels, preds, dataset.get_weight(), dataset.get_group())
212
        else:
213
            raise TypeError(f"Self-defined eval function should have 2, 3 or 4 arguments, got {argc}")
214

wxchan's avatar
wxchan committed
215

216
217
218
219
220
221
222
223
224
225
226
227
228
229
# documentation templates for LGBMModel methods are shared between the classes in
# this module and those in the ``dask`` module

_lgbmmodel_doc_fit = (
    """
    Build a gradient boosting model from the training set (X, y).

    Parameters
    ----------
    X : {X_shape}
        Input feature matrix.
    y : {y_shape}
        The target values (class labels in classification, real numbers in regression).
    sample_weight : {sample_weight_shape}
230
        Weights of training data. Weights should be non-negative.
231
    init_score : {init_score_shape}
232
233
234
235
236
237
238
239
240
        Init score of training data.
    group : {group_shape}
        Group/query data.
        Only used in the learning-to-rank task.
        sum(group) = n_samples.
        For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
        where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
    eval_set : list or None, optional (default=None)
        A list of (X, y) tuple pairs to use as validation sets.
241
    eval_names : list of str, or None, optional (default=None)
242
        Names of eval_set.
243
    eval_sample_weight : {eval_sample_weight_shape}
244
        Weights of eval data. Weights should be non-negative.
245
246
    eval_class_weight : list or None, optional (default=None)
        Class weights of eval data.
247
    eval_init_score : {eval_init_score_shape}
248
        Init score of eval data.
249
    eval_group : {eval_group_shape}
250
        Group data of eval data.
251
252
    eval_metric : str, callable, list or None, optional (default=None)
        If str, it should be a built-in evaluation metric to use.
253
254
255
256
        If callable, it should be a custom evaluation metric, see note below for more details.
        If list, it can be a list of built-in metrics, a list of custom evaluation metrics, or a mix of both.
        In either case, the ``metric`` from the model parameters will be evaluated and used as well.
        Default: 'l2' for LGBMRegressor, 'logloss' for LGBMClassifier, 'ndcg' for LGBMRanker.
257
    feature_name : list of str, or 'auto', optional (default='auto')
258
259
        Feature names.
        If 'auto' and data is pandas DataFrame, data columns names are used.
260
    categorical_feature : list of str or int, or 'auto', optional (default='auto')
261
262
        Categorical features.
        If list of int, interpreted as indices.
263
        If list of str, interpreted as feature names (need to specify ``feature_name`` as well).
264
        If 'auto' and data is pandas DataFrame, pandas unordered categorical columns are used.
265
        All values in categorical features will be cast to int32 and thus should be less than int32 max value (2147483647).
266
267
268
        Large values could be memory consuming. Consider using consecutive integers starting from zero.
        All negative values in categorical features will be treated as missing values.
        The output cannot be monotonically constrained with respect to a categorical feature.
269
        Floating point numbers in categorical features will be rounded towards 0.
270
    callbacks : list of callable, or None, optional (default=None)
271
272
        List of callback functions that are applied at each iteration.
        See Callbacks in Python API for more information.
273
    init_model : str, pathlib.Path, Booster, LGBMModel or None, optional (default=None)
274
275
276
277
        Filename of LightGBM model, Booster instance or LGBMModel instance used for continue training.

    Returns
    -------
278
    self : LGBMModel
279
280
281
282
283
284
285
286
287
288
289
290
291
        Returns self.
    """
)

_lgbmmodel_doc_custom_eval_note = """
    Note
    ----
    Custom eval function expects a callable with following signatures:
    ``func(y_true, y_pred)``, ``func(y_true, y_pred, weight)`` or
    ``func(y_true, y_pred, weight, group)``
    and returns (eval_name, eval_result, is_higher_better) or
    list of (eval_name, eval_result, is_higher_better):

292
        y_true : numpy 1-D array of shape = [n_samples]
293
            The target values.
294
        y_pred : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
295
            The predicted values.
296
297
            In case of custom ``objective``, predicted values are returned before any transformation,
            e.g. they are raw margin instead of probability of positive class for binary task in this case.
298
        weight : numpy 1-D array of shape = [n_samples]
299
            The weight of samples. Weights should be non-negative.
300
        group : numpy 1-D array
301
302
303
304
305
            Group/query data.
            Only used in the learning-to-rank task.
            sum(group) = n_samples.
            For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
            where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
306
        eval_name : str
Andrew Ziem's avatar
Andrew Ziem committed
307
            The name of evaluation function (without whitespace).
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
        eval_result : float
            The eval result.
        is_higher_better : bool
            Is eval result higher better, e.g. AUC is ``is_higher_better``.
"""

_lgbmmodel_doc_predict = (
    """
    {description}

    Parameters
    ----------
    X : {X_shape}
        Input features matrix.
    raw_score : bool, optional (default=False)
        Whether to predict raw scores.
    start_iteration : int, optional (default=0)
        Start index of the iteration to predict.
        If <= 0, starts from the first iteration.
    num_iteration : int or None, optional (default=None)
        Total number of iterations used in the prediction.
        If None, if the best iteration exists and start_iteration <= 0, the best iteration is used;
        otherwise, all iterations from ``start_iteration`` are used (no limits).
        If <= 0, all iterations from ``start_iteration`` are used (no limits).
    pred_leaf : bool, optional (default=False)
        Whether to predict leaf index.
    pred_contrib : bool, optional (default=False)
        Whether to predict feature contributions.

        .. note::

            If you want to get more explanations for your model's predictions using SHAP values,
            like SHAP interaction values,
            you can install the shap package (https://github.com/slundberg/shap).
            Note that unlike the shap package, with ``pred_contrib`` we return a matrix with an extra
            column, where the last column is the expected value.

345
346
347
    validate_features : bool, optional (default=False)
        If True, ensure that the features used to predict match the ones used to train.
        Used only if data is pandas DataFrame.
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
    **kwargs
        Other parameters for the prediction.

    Returns
    -------
    {output_name} : {predicted_result_shape}
        The predicted values.
    X_leaves : {X_leaves_shape}
        If ``pred_leaf=True``, the predicted leaf of every tree for each sample.
    X_SHAP_values : {X_SHAP_values_shape}
        If ``pred_contrib=True``, the feature contributions for each sample.
    """
)


363
364
class LGBMModel(_LGBMModelBase):
    """Implementation of the scikit-learn API for LightGBM."""
wxchan's avatar
wxchan committed
365

366
367
368
369
370
371
372
373
    def __init__(
        self,
        boosting_type: str = 'gbdt',
        num_leaves: int = 31,
        max_depth: int = -1,
        learning_rate: float = 0.1,
        n_estimators: int = 100,
        subsample_for_bin: int = 200000,
374
        objective: Optional[Union[str, _LGBM_ScikitCustomObjectiveFunction]] = None,
375
376
377
378
379
380
381
382
383
384
        class_weight: Optional[Union[Dict, str]] = None,
        min_split_gain: float = 0.,
        min_child_weight: float = 1e-3,
        min_child_samples: int = 20,
        subsample: float = 1.,
        subsample_freq: int = 0,
        colsample_bytree: float = 1.,
        reg_alpha: float = 0.,
        reg_lambda: float = 0.,
        random_state: Optional[Union[int, np.random.RandomState]] = None,
385
        n_jobs: Optional[int] = None,
386
387
388
        importance_type: str = 'split',
        **kwargs
    ):
389
        r"""Construct a gradient boosting model.
wxchan's avatar
wxchan committed
390
391
392

        Parameters
        ----------
393
        boosting_type : str, optional (default='gbdt')
394
395
396
397
            'gbdt', traditional Gradient Boosting Decision Tree.
            'dart', Dropouts meet Multiple Additive Regression Trees.
            'rf', Random Forest.
        num_leaves : int, optional (default=31)
wxchan's avatar
wxchan committed
398
            Maximum tree leaves for base learners.
399
        max_depth : int, optional (default=-1)
400
            Maximum tree depth for base learners, <=0 means no limit.
401
        learning_rate : float, optional (default=0.1)
402
            Boosting learning rate.
403
404
405
            You can use ``callbacks`` parameter of ``fit`` method to shrink/adapt learning rate
            in training using ``reset_parameter`` callback.
            Note, that this will ignore the ``learning_rate`` argument in training.
406
        n_estimators : int, optional (default=100)
wxchan's avatar
wxchan committed
407
            Number of boosted trees to fit.
408
        subsample_for_bin : int, optional (default=200000)
wxchan's avatar
wxchan committed
409
            Number of samples for constructing bins.
410
        objective : str, callable or None, optional (default=None)
wxchan's avatar
wxchan committed
411
412
            Specify the learning task and the corresponding learning objective or
            a custom objective function to be used (see note below).
413
            Default: 'regression' for LGBMRegressor, 'binary' or 'multiclass' for LGBMClassifier, 'lambdarank' for LGBMRanker.
414
415
416
417
        class_weight : dict, 'balanced' or None, optional (default=None)
            Weights associated with classes in the form ``{class_label: weight}``.
            Use this parameter only for multi-class classification task;
            for binary classification task you may use ``is_unbalance`` or ``scale_pos_weight`` parameters.
418
419
420
            Note, that the usage of all these parameters will result in poor estimates of the individual class probabilities.
            You may want to consider performing probability calibration
            (https://scikit-learn.org/stable/modules/calibration.html) of your model.
421
422
423
            The 'balanced' mode uses the values of y to automatically adjust weights
            inversely proportional to class frequencies in the input data as ``n_samples / (n_classes * np.bincount(y))``.
            If None, all classes are supposed to have weight one.
424
            Note, that these weights will be multiplied with ``sample_weight`` (passed through the ``fit`` method)
425
            if ``sample_weight`` is specified.
426
        min_split_gain : float, optional (default=0.)
wxchan's avatar
wxchan committed
427
            Minimum loss reduction required to make a further partition on a leaf node of the tree.
428
        min_child_weight : float, optional (default=1e-3)
429
            Minimum sum of instance weight (Hessian) needed in a child (leaf).
430
        min_child_samples : int, optional (default=20)
431
            Minimum number of data needed in a child (leaf).
432
        subsample : float, optional (default=1.)
wxchan's avatar
wxchan committed
433
            Subsample ratio of the training instance.
434
        subsample_freq : int, optional (default=0)
Andrew Ziem's avatar
Andrew Ziem committed
435
            Frequency of subsample, <=0 means no enable.
436
        colsample_bytree : float, optional (default=1.)
wxchan's avatar
wxchan committed
437
            Subsample ratio of columns when constructing each tree.
438
        reg_alpha : float, optional (default=0.)
439
            L1 regularization term on weights.
440
        reg_lambda : float, optional (default=0.)
441
            L2 regularization term on weights.
442
        random_state : int, RandomState object or None, optional (default=None)
wxchan's avatar
wxchan committed
443
            Random number seed.
444
445
446
            If int, this number is used to seed the C++ code.
            If RandomState object (numpy), a random integer is picked based on its state to seed the C++ code.
            If None, default seeds in C++ code are used.
447
448
449
450
451
452
453
454
455
456
457
458
        n_jobs : int or None, optional (default=None)
            Number of parallel threads to use for training (can be changed at prediction time by
            passing it as an extra keyword argument).

            For better performance, it is recommended to set this to the number of physical cores
            in the CPU.

            Negative integers are interpreted as following joblib's formula (n_cpus + 1 + n_jobs), just like
            scikit-learn (so e.g. -1 means using all threads). A value of zero corresponds the default number of
            threads configured for OpenMP in the system. A value of ``None`` (the default) corresponds
            to using the number of physical cores in the system (its correct detection requires
            either the ``joblib`` or the ``psutil`` util libraries to be installed).
459
        importance_type : str, optional (default='split')
460
            The type of feature importance to be filled into ``feature_importances_``.
461
462
463
464
            If 'split', result contains numbers of times the feature is used in a model.
            If 'gain', result contains total gains of splits which use the feature.
        **kwargs
            Other parameters for the model.
wxchan's avatar
wxchan committed
465
            Check http://lightgbm.readthedocs.io/en/latest/Parameters.html for more parameters.
466

Nikita Titov's avatar
Nikita Titov committed
467
468
469
            .. warning::

                \*\*kwargs is not supported in sklearn, it may cause unexpected issues.
wxchan's avatar
wxchan committed
470
471
472

        Note
        ----
473
474
        A custom objective function can be provided for the ``objective`` parameter.
        In this case, it should have the signature
475
476
477
        ``objective(y_true, y_pred) -> grad, hess``,
        ``objective(y_true, y_pred, weight) -> grad, hess``
        or ``objective(y_true, y_pred, weight, group) -> grad, hess``:
wxchan's avatar
wxchan committed
478

479
            y_true : numpy 1-D array of shape = [n_samples]
480
                The target values.
481
            y_pred : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
482
                The predicted values.
483
484
                Predicted values are returned before any transformation,
                e.g. they are raw margin instead of probability of positive class for binary task.
485
486
            weight : numpy 1-D array of shape = [n_samples]
                The weight of samples. Weights should be non-negative.
487
            group : numpy 1-D array
488
489
490
                Group/query data.
                Only used in the learning-to-rank task.
                sum(group) = n_samples.
491
492
                For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
                where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
493
            grad : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
494
495
                The value of the first order derivative (gradient) of the loss
                with respect to the elements of y_pred for each sample point.
496
            hess : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
497
498
                The value of the second order derivative (Hessian) of the loss
                with respect to the elements of y_pred for each sample point.
wxchan's avatar
wxchan committed
499

500
        For multi-class task, y_pred is a numpy 2-D array of shape = [n_samples, n_classes],
501
        and grad and hess should be returned in the same format.
wxchan's avatar
wxchan committed
502
        """
wxchan's avatar
wxchan committed
503
        if not SKLEARN_INSTALLED:
504
505
            raise LightGBMError('scikit-learn is required for lightgbm.sklearn. '
                                'You must install scikit-learn and restart your session to use this module.')
wxchan's avatar
wxchan committed
506

507
        self.boosting_type = boosting_type
508
        self.objective = objective
wxchan's avatar
wxchan committed
509
510
511
512
        self.num_leaves = num_leaves
        self.max_depth = max_depth
        self.learning_rate = learning_rate
        self.n_estimators = n_estimators
wxchan's avatar
wxchan committed
513
        self.subsample_for_bin = subsample_for_bin
wxchan's avatar
wxchan committed
514
515
516
517
518
519
520
521
        self.min_split_gain = min_split_gain
        self.min_child_weight = min_child_weight
        self.min_child_samples = min_child_samples
        self.subsample = subsample
        self.subsample_freq = subsample_freq
        self.colsample_bytree = colsample_bytree
        self.reg_alpha = reg_alpha
        self.reg_lambda = reg_lambda
522
523
        self.random_state = random_state
        self.n_jobs = n_jobs
524
        self.importance_type = importance_type
525
        self._Booster: Optional[Booster] = None
526
527
        self._evals_result: _EvalResultDict = {}
        self._best_score: _LGBM_BoosterBestScoreType = {}
528
        self._best_iteration: int = -1
529
        self._other_params: Dict[str, Any] = {}
530
        self._objective = objective
531
        self.class_weight = class_weight
532
533
        self._class_weight: Optional[Union[Dict, str]] = None
        self._class_map: Optional[Dict[int, int]] = None
534
535
        self._n_features: int = -1
        self._n_features_in: int = -1
536
        self._classes = None
537
        self._n_classes: Optional[int] = None
538
        self.set_params(**kwargs)
wxchan's avatar
wxchan committed
539

540
    def _more_tags(self) -> Dict[str, Any]:
541
542
543
544
545
546
547
548
549
550
        return {
            'allow_nan': True,
            'X_types': ['2darray', 'sparse', '1dlabels'],
            '_xfail_checks': {
                'check_no_attributes_set_in_init':
                'scikit-learn incorrectly asserts that private attributes '
                'cannot be set in __init__: '
                '(see https://github.com/microsoft/LightGBM/issues/2628)'
            }
        }
Nikita Titov's avatar
Nikita Titov committed
551

552
553
554
    def __sklearn_is_fitted__(self) -> bool:
        return getattr(self, "fitted_", False)

555
    def get_params(self, deep: bool = True) -> Dict[str, Any]:
556
557
558
559
560
561
562
563
564
565
566
567
568
        """Get parameters for this estimator.

        Parameters
        ----------
        deep : bool, optional (default=True)
            If True, will return the parameters for this estimator and
            contained subobjects that are estimators.

        Returns
        -------
        params : dict
            Parameter names mapped to their values.
        """
569
        params = super().get_params(deep=deep)
570
        params.update(self._other_params)
wxchan's avatar
wxchan committed
571
572
        return params

573
    def set_params(self, **params: Any) -> "LGBMModel":
574
575
576
577
578
579
580
581
582
583
584
585
        """Set the parameters of this estimator.

        Parameters
        ----------
        **params
            Parameter names with their new values.

        Returns
        -------
        self : object
            Returns self.
        """
wxchan's avatar
wxchan committed
586
587
        for key, value in params.items():
            setattr(self, key, value)
588
589
            if hasattr(self, f"_{key}"):
                setattr(self, f"_{key}", value)
590
            self._other_params[key] = value
wxchan's avatar
wxchan committed
591
        return self
wxchan's avatar
wxchan committed
592

593
594
595
596
597
598
599
600
601
602
603
604
605
606
    def _process_params(self, stage: str) -> Dict[str, Any]:
        """Process the parameters of this estimator based on its type, parameter aliases, etc.

        Parameters
        ----------
        stage : str
            Name of the stage (can be ``fit`` or ``predict``) this method is called from.

        Returns
        -------
        processed_params : dict
            Processed parameter names mapped to their values.
        """
        assert stage in {"fit", "predict"}
607
608
609
610
611
        params = self.get_params()

        params.pop('objective', None)
        for alias in _ConfigAliases.get('objective'):
            if alias in params:
612
                obj = params.pop(alias)
613
                _log_warning(f"Found '{alias}' in params. Will use it instead of 'objective' argument")
614
615
616
617
618
619
620
621
622
623
624
625
626
                if stage == "fit":
                    self._objective = obj
        if stage == "fit":
            if self._objective is None:
                if isinstance(self, LGBMRegressor):
                    self._objective = "regression"
                elif isinstance(self, LGBMClassifier):
                    if self._n_classes > 2:
                        self._objective = "multiclass"
                    else:
                        self._objective = "binary"
                elif isinstance(self, LGBMRanker):
                    self._objective = "lambdarank"
627
                else:
628
                    raise ValueError("Unknown LGBMModel type.")
629
        if callable(self._objective):
630
            if stage == "fit":
631
632
633
                params['objective'] = _ObjectiveFunctionWrapper(self._objective)
            else:
                params['objective'] = 'None'
634
        else:
635
            params['objective'] = self._objective
636

637
        params.pop('importance_type', None)
wxchan's avatar
wxchan committed
638
        params.pop('n_estimators', None)
639
        params.pop('class_weight', None)
640

641
642
        if isinstance(params['random_state'], np.random.RandomState):
            params['random_state'] = params['random_state'].randint(np.iinfo(np.int32).max)
643
        if self._n_classes is not None and self._n_classes > 2:
644
645
            for alias in _ConfigAliases.get('num_class'):
                params.pop(alias, None)
646
647
            params['num_class'] = self._n_classes
        if hasattr(self, '_eval_at'):
648
            eval_at = self._eval_at
649
            for alias in _ConfigAliases.get('eval_at'):
650
651
652
653
                if alias in params:
                    _log_warning(f"Found '{alias}' in params. Will use it instead of 'eval_at' argument")
                    eval_at = params.pop(alias)
            params['eval_at'] = eval_at
wxchan's avatar
wxchan committed
654

655
        # register default metric for consistency with callable eval_metric case
656
        original_metric = self._objective if isinstance(self._objective, str) else None
657
658
659
660
661
662
663
664
665
666
        if original_metric is None:
            # try to deduce from class instance
            if isinstance(self, LGBMRegressor):
                original_metric = "l2"
            elif isinstance(self, LGBMClassifier):
                original_metric = "multi_logloss" if self._n_classes > 2 else "binary_logloss"
            elif isinstance(self, LGBMRanker):
                original_metric = "ndcg"

        # overwrite default metric by explicitly set metric
667
        params = _choose_param_value("metric", params, original_metric)
668

669
670
671
672
673
674
        # use joblib conventions for negative n_jobs, just like scikit-learn
        # at predict time, this is handled later due to the order of parameter updates
        if stage == "fit":
            params = _choose_param_value("num_threads", params, self.n_jobs)
            params["num_threads"] = self._process_n_jobs(params["num_threads"])

675
676
        return params

677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
    def _process_n_jobs(self, n_jobs: Optional[int]) -> int:
        """Convert special values of n_jobs to their actual values according to the formulas that apply.

        Parameters
        ----------
        n_jobs : int or None
            The original value of n_jobs, potentially having special values such as 'None' or
            negative integers.

        Returns
        -------
        n_jobs : int
            The value of n_jobs with special values converted to actual number of threads.
        """
        if n_jobs is None:
            n_jobs = _LGBMCpuCount(only_physical_cores=True)
        elif n_jobs < 0:
            n_jobs = max(_LGBMCpuCount(only_physical_cores=False) + 1 + n_jobs, 1)
        return n_jobs

697
698
699
700
701
702
703
704
    def fit(
        self,
        X,
        y,
        sample_weight=None,
        init_score=None,
        group=None,
        eval_set=None,
705
        eval_names: Optional[List[str]] = None,
706
707
708
709
        eval_sample_weight=None,
        eval_class_weight=None,
        eval_init_score=None,
        eval_group=None,
710
        eval_metric: Optional[_LGBM_ScikitEvalMetricType] = None,
711
712
713
        feature_name='auto',
        categorical_feature='auto',
        callbacks=None,
714
        init_model: Optional[Union[str, Path, Booster, "LGBMModel"]] = None
715
    ):
716
717
718
719
720
721
722
723
724
725
726
727
728
        """Docstring is set after definition, using a template."""
        params = self._process_params(stage="fit")

        # Do not modify original args in fit function
        # Refer to https://github.com/microsoft/LightGBM/pull/2619
        eval_metric_list = copy.deepcopy(eval_metric)
        if not isinstance(eval_metric_list, list):
            eval_metric_list = [eval_metric_list]

        # Separate built-in from callable evaluation metrics
        eval_metrics_callable = [_EvalFunctionWrapper(f) for f in eval_metric_list if callable(f)]
        eval_metrics_builtin = [m for m in eval_metric_list if isinstance(m, str)]

729
        # concatenate metric from params (or default if not provided in params) and eval_metric
730
731
        params['metric'] = [params['metric']] if isinstance(params['metric'], (str, type(None))) else params['metric']
        params['metric'] = [e for e in eval_metrics_builtin if e not in params['metric']] + params['metric']
732
        params['metric'] = [metric for metric in params['metric'] if metric is not None]
wxchan's avatar
wxchan committed
733

734
        if not isinstance(X, (pd_DataFrame, dt_DataTable)):
735
            _X, _y = _LGBMCheckXY(X, y, accept_sparse=True, force_all_finite=False, ensure_min_samples=2)
736
737
            if sample_weight is not None:
                sample_weight = _LGBMCheckSampleWeight(sample_weight, _X)
738
739
        else:
            _X, _y = X, y
740

741
742
743
744
        if self._class_weight is None:
            self._class_weight = self.class_weight
        if self._class_weight is not None:
            class_sample_weight = _LGBMComputeSampleWeight(self._class_weight, y)
745
746
747
748
            if sample_weight is None or len(sample_weight) == 0:
                sample_weight = class_sample_weight
            else:
                sample_weight = np.multiply(sample_weight, class_sample_weight)
749

750
        self._n_features = _X.shape[1]
751
752
        # copy for consistency
        self._n_features_in = self._n_features
753

754
755
756
        train_set = Dataset(data=_X, label=_y, weight=sample_weight, group=group,
                            init_score=init_score, categorical_feature=categorical_feature,
                            params=params)
Guolin Ke's avatar
Guolin Ke committed
757
758
759

        valid_sets = []
        if eval_set is not None:
760

761
            def _get_meta_data(collection, name, i):
762
763
764
765
766
767
768
                if collection is None:
                    return None
                elif isinstance(collection, list):
                    return collection[i] if len(collection) > i else None
                elif isinstance(collection, dict):
                    return collection.get(i, None)
                else:
769
                    raise TypeError(f"{name} should be dict or list")
770

Guolin Ke's avatar
Guolin Ke committed
771
772
773
            if isinstance(eval_set, tuple):
                eval_set = [eval_set]
            for i, valid_data in enumerate(eval_set):
774
                # reduce cost for prediction training data
Guolin Ke's avatar
Guolin Ke committed
775
776
777
                if valid_data[0] is X and valid_data[1] is y:
                    valid_set = train_set
                else:
778
779
780
781
782
783
                    valid_weight = _get_meta_data(eval_sample_weight, 'eval_sample_weight', i)
                    valid_class_weight = _get_meta_data(eval_class_weight, 'eval_class_weight', i)
                    if valid_class_weight is not None:
                        if isinstance(valid_class_weight, dict) and self._class_map is not None:
                            valid_class_weight = {self._class_map[k]: v for k, v in valid_class_weight.items()}
                        valid_class_sample_weight = _LGBMComputeSampleWeight(valid_class_weight, valid_data[1])
784
785
786
787
                        if valid_weight is None or len(valid_weight) == 0:
                            valid_weight = valid_class_sample_weight
                        else:
                            valid_weight = np.multiply(valid_weight, valid_class_sample_weight)
788
789
                    valid_init_score = _get_meta_data(eval_init_score, 'eval_init_score', i)
                    valid_group = _get_meta_data(eval_group, 'eval_group', i)
790
791
792
793
                    valid_set = Dataset(data=valid_data[0], label=valid_data[1], weight=valid_weight,
                                        group=valid_group, init_score=valid_init_score,
                                        categorical_feature='auto', params=params)

Guolin Ke's avatar
Guolin Ke committed
794
795
                valid_sets.append(valid_set)

796
797
798
        if isinstance(init_model, LGBMModel):
            init_model = init_model.booster_

799
800
801
        if callbacks is None:
            callbacks = []
        else:
802
            callbacks = copy.copy(callbacks)  # don't use deepcopy here to allow non-serializable objects
803

804
        evals_result: _EvalResultDict = {}
805
806
807
808
809
810
811
812
813
814
815
816
817
        callbacks.append(record_evaluation(evals_result))

        self._Booster = train(
            params=params,
            train_set=train_set,
            num_boost_round=self.n_estimators,
            valid_sets=valid_sets,
            valid_names=eval_names,
            feval=eval_metrics_callable,
            init_model=init_model,
            feature_name=feature_name,
            callbacks=callbacks
        )
wxchan's avatar
wxchan committed
818

819
        self._evals_result = evals_result
820
        self._best_iteration = self._Booster.best_iteration
821
        self._best_score = self._Booster.best_score
wxchan's avatar
wxchan committed
822

823
824
        self.fitted_ = True

wxchan's avatar
wxchan committed
825
        # free dataset
826
        self._Booster.free_dataset()
wxchan's avatar
wxchan committed
827
        del train_set, valid_sets
wxchan's avatar
wxchan committed
828
829
        return self

830
831
832
833
    fit.__doc__ = _lgbmmodel_doc_fit.format(
        X_shape="array-like or sparse matrix of shape = [n_samples, n_features]",
        y_shape="array-like of shape = [n_samples]",
        sample_weight_shape="array-like of shape = [n_samples] or None, optional (default=None)",
834
        init_score_shape="array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task) or shape = [n_samples, n_classes] (for multi-class task) or None, optional (default=None)",
835
        group_shape="array-like or None, optional (default=None)",
836
837
838
        eval_sample_weight_shape="list of array, or None, optional (default=None)",
        eval_init_score_shape="list of array, or None, optional (default=None)",
        eval_group_shape="list of array, or None, optional (default=None)"
839
840
    ) + "\n\n" + _lgbmmodel_doc_custom_eval_note

841
842
843
844
845
846
847
848
849
850
851
    def predict(
        self,
        X,
        raw_score: bool = False,
        start_iteration: int = 0,
        num_iteration: Optional[int] = None,
        pred_leaf: bool = False,
        pred_contrib: bool = False,
        validate_features: bool = False,
        **kwargs: Any
    ):
852
        """Docstring is set after definition, using a template."""
853
        if not self.__sklearn_is_fitted__():
854
            raise LGBMNotFittedError("Estimator not fitted, call fit before exploiting the model.")
855
        if not isinstance(X, (pd_DataFrame, dt_DataTable)):
856
            X = _LGBMCheckArray(X, accept_sparse=True, force_all_finite=False)
857
858
859
        n_features = X.shape[1]
        if self._n_features != n_features:
            raise ValueError("Number of features of the model must "
860
861
                             f"match the input. Model n_features_ is {self._n_features} and "
                             f"input n_features is {n_features}")
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
        # retrive original params that possibly can be used in both training and prediction
        # and then overwrite them (considering aliases) with params that were passed directly in prediction
        predict_params = self._process_params(stage="predict")
        for alias in _ConfigAliases.get_by_alias(
            "data",
            "X",
            "raw_score",
            "start_iteration",
            "num_iteration",
            "pred_leaf",
            "pred_contrib",
            *kwargs.keys()
        ):
            predict_params.pop(alias, None)
        predict_params.update(kwargs)
877
878
879

        # number of threads can have values with special meaning which is only applied
        # in the scikit-learn interface, these should not reach the c++ side as-is
880
881
        predict_params = _choose_param_value("num_threads", predict_params, self.n_jobs)
        predict_params["num_threads"] = self._process_n_jobs(predict_params["num_threads"])
882

883
        return self._Booster.predict(X, raw_score=raw_score, start_iteration=start_iteration, num_iteration=num_iteration,
884
885
                                     pred_leaf=pred_leaf, pred_contrib=pred_contrib, validate_features=validate_features,
                                     **predict_params)
wxchan's avatar
wxchan committed
886

887
888
889
890
891
892
893
894
895
    predict.__doc__ = _lgbmmodel_doc_predict.format(
        description="Return the predicted value for each sample.",
        X_shape="array-like or sparse matrix of shape = [n_samples, n_features]",
        output_name="predicted_result",
        predicted_result_shape="array-like of shape = [n_samples] or shape = [n_samples, n_classes]",
        X_leaves_shape="array-like of shape = [n_samples, n_trees] or shape = [n_samples, n_trees * n_classes]",
        X_SHAP_values_shape="array-like of shape = [n_samples, n_features + 1] or shape = [n_samples, (n_features + 1) * n_classes] or list with n_classes length of such objects"
    )

896
    @property
897
    def n_features_(self) -> int:
898
        """:obj:`int`: The number of features of fitted model."""
899
        if not self.__sklearn_is_fitted__():
900
901
902
            raise LGBMNotFittedError('No n_features found. Need to call fit beforehand.')
        return self._n_features

903
    @property
904
    def n_features_in_(self) -> int:
905
        """:obj:`int`: The number of features of fitted model."""
906
        if not self.__sklearn_is_fitted__():
907
908
909
            raise LGBMNotFittedError('No n_features_in found. Need to call fit beforehand.')
        return self._n_features_in

910
    @property
911
    def best_score_(self) -> _LGBM_BoosterBestScoreType:
912
        """:obj:`dict`: The best score of fitted model."""
913
        if not self.__sklearn_is_fitted__():
914
915
916
917
            raise LGBMNotFittedError('No best_score found. Need to call fit beforehand.')
        return self._best_score

    @property
918
    def best_iteration_(self) -> int:
919
        """:obj:`int`: The best iteration of fitted model if ``early_stopping()`` callback has been specified."""
920
        if not self.__sklearn_is_fitted__():
921
            raise LGBMNotFittedError('No best_iteration found. Need to call fit with early_stopping callback beforehand.')
922
923
924
        return self._best_iteration

    @property
925
    def objective_(self) -> Union[str, _LGBM_ScikitCustomObjectiveFunction]:
926
        """:obj:`str` or :obj:`callable`: The concrete objective used while fitting this model."""
927
        if not self.__sklearn_is_fitted__():
928
929
930
            raise LGBMNotFittedError('No objective found. Need to call fit beforehand.')
        return self._objective

931
932
933
934
935
936
937
938
939
    @property
    def n_estimators_(self) -> int:
        """:obj:`int`: True number of boosting iterations performed.

        This might be less than parameter ``n_estimators`` if early stopping was enabled or
        if boosting stopped early due to limits on complexity like ``min_gain_to_split``.
        """
        if not self.__sklearn_is_fitted__():
            raise LGBMNotFittedError('No n_estimators found. Need to call fit beforehand.')
940
        return self._Booster.current_iteration()  # type: ignore
941
942
943
944
945
946
947
948
949
950

    @property
    def n_iter_(self) -> int:
        """:obj:`int`: True number of boosting iterations performed.

        This might be less than parameter ``n_estimators`` if early stopping was enabled or
        if boosting stopped early due to limits on complexity like ``min_gain_to_split``.
        """
        if not self.__sklearn_is_fitted__():
            raise LGBMNotFittedError('No n_iter found. Need to call fit beforehand.')
951
        return self._Booster.current_iteration()  # type: ignore
952

953
954
    @property
    def booster_(self):
955
        """Booster: The underlying Booster of this model."""
956
        if not self.__sklearn_is_fitted__():
957
            raise LGBMNotFittedError('No booster found. Need to call fit beforehand.')
958
        return self._Booster
wxchan's avatar
wxchan committed
959

960
    @property
961
    def evals_result_(self) -> _EvalResultDict:
962
        """:obj:`dict`: The evaluation results if validation sets have been specified."""
963
        if not self.__sklearn_is_fitted__():
964
965
            raise LGBMNotFittedError('No results found. Need to call fit with eval_set beforehand.')
        return self._evals_result
966
967

    @property
968
    def feature_importances_(self):
969
        """:obj:`array` of shape = [n_features]: The feature importances (the higher, the more important).
970

Nikita Titov's avatar
Nikita Titov committed
971
972
973
974
        .. note::

            ``importance_type`` attribute is passed to the function
            to configure the type of importance values to be extracted.
975
        """
976
        if not self.__sklearn_is_fitted__():
977
            raise LGBMNotFittedError('No feature_importances found. Need to call fit beforehand.')
978
        return self._Booster.feature_importance(importance_type=self.importance_type)
wxchan's avatar
wxchan committed
979

980
981
    @property
    def feature_name_(self):
982
        """:obj:`array` of shape = [n_features]: The names of features."""
983
        if not self.__sklearn_is_fitted__():
984
985
986
            raise LGBMNotFittedError('No feature_name found. Need to call fit beforehand.')
        return self._Booster.feature_name()

wxchan's avatar
wxchan committed
987

988
class LGBMRegressor(_LGBMRegressorBase, LGBMModel):
989
    """LightGBM regressor."""
wxchan's avatar
wxchan committed
990

991
    def fit(  # type: ignore[override]
992
993
994
995
996
997
        self,
        X,
        y,
        sample_weight=None,
        init_score=None,
        eval_set=None,
998
        eval_names: Optional[List[str]] = None,
999
1000
        eval_sample_weight=None,
        eval_init_score=None,
1001
        eval_metric: Optional[_LGBM_ScikitEvalMetricType] = None,
1002
1003
1004
        feature_name='auto',
        categorical_feature='auto',
        callbacks=None,
1005
        init_model: Optional[Union[str, Path, Booster, LGBMModel]] = None
1006
    ):
1007
        """Docstring is inherited from the LGBMModel."""
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
        super().fit(
            X,
            y,
            sample_weight=sample_weight,
            init_score=init_score,
            eval_set=eval_set,
            eval_names=eval_names,
            eval_sample_weight=eval_sample_weight,
            eval_init_score=eval_init_score,
            eval_metric=eval_metric,
            feature_name=feature_name,
            categorical_feature=categorical_feature,
            callbacks=callbacks,
            init_model=init_model
        )
Guolin Ke's avatar
Guolin Ke committed
1023
1024
        return self

1025
    _base_doc = LGBMModel.fit.__doc__.replace("self : LGBMModel", "self : LGBMRegressor")  # type: ignore
1026
1027
    _base_doc = (_base_doc[:_base_doc.find('group :')]  # type: ignore
                 + _base_doc[_base_doc.find('eval_set :'):])  # type: ignore
1028
1029
1030
1031
    _base_doc = (_base_doc[:_base_doc.find('eval_class_weight :')]
                 + _base_doc[_base_doc.find('eval_init_score :'):])
    fit.__doc__ = (_base_doc[:_base_doc.find('eval_group :')]
                   + _base_doc[_base_doc.find('eval_metric :'):])
wxchan's avatar
wxchan committed
1032

1033

1034
class LGBMClassifier(_LGBMClassifierBase, LGBMModel):
1035
    """LightGBM classifier."""
wxchan's avatar
wxchan committed
1036

1037
    def fit(  # type: ignore[override]
1038
1039
1040
1041
1042
1043
        self,
        X,
        y,
        sample_weight=None,
        init_score=None,
        eval_set=None,
1044
        eval_names: Optional[List[str]] = None,
1045
1046
1047
        eval_sample_weight=None,
        eval_class_weight=None,
        eval_init_score=None,
1048
        eval_metric: Optional[_LGBM_ScikitEvalMetricType] = None,
1049
1050
1051
        feature_name='auto',
        categorical_feature='auto',
        callbacks=None,
1052
        init_model: Optional[Union[str, Path, Booster, LGBMModel]] = None
1053
    ):
1054
        """Docstring is inherited from the LGBMModel."""
1055
        _LGBMAssertAllFinite(y)
1056
1057
        _LGBMCheckClassificationTargets(y)
        self._le = _LGBMLabelEncoder().fit(y)
1058
        _y = self._le.transform(y)
1059
        self._class_map = dict(zip(self._le.classes_, self._le.transform(self._le.classes_)))
1060
1061
        if isinstance(self.class_weight, dict):
            self._class_weight = {self._class_map[k]: v for k, v in self.class_weight.items()}
1062

1063
1064
        self._classes = self._le.classes_
        self._n_classes = len(self._classes)
1065

1066
1067
        # adjust eval metrics to match whether binary or multiclass
        # classification is being performed
1068
        if not callable(eval_metric):
1069
1070
1071
1072
1073
1074
            if isinstance(eval_metric, list):
                eval_metric_list = eval_metric
            elif isinstance(eval_metric, str):
                eval_metric_list = [eval_metric]
            else:
                eval_metric_list = []
1075
            if self._n_classes > 2:
1076
                for index, metric in enumerate(eval_metric_list):
1077
                    if metric in {'logloss', 'binary_logloss'}:
1078
                        eval_metric_list[index] = "multi_logloss"
1079
                    elif metric in {'error', 'binary_error'}:
1080
                        eval_metric_list[index] = "multi_error"
1081
            else:
1082
                for index, metric in enumerate(eval_metric_list):
1083
                    if metric in {'logloss', 'multi_logloss'}:
1084
                        eval_metric_list[index] = 'binary_logloss'
1085
                    elif metric in {'error', 'multi_error'}:
1086
1087
                        eval_metric_list[index] = 'binary_error'
            eval_metric = eval_metric_list
wxchan's avatar
wxchan committed
1088

1089
1090
        # do not modify args, as it causes errors in model selection tools
        valid_sets = None
wxchan's avatar
wxchan committed
1091
        if eval_set is not None:
1092
1093
            if isinstance(eval_set, tuple):
                eval_set = [eval_set]
1094
            valid_sets = [None] * len(eval_set)
1095
1096
            for i, (valid_x, valid_y) in enumerate(eval_set):
                if valid_x is X and valid_y is y:
1097
                    valid_sets[i] = (valid_x, _y)
1098
                else:
1099
                    valid_sets[i] = (valid_x, self._le.transform(valid_y))
1100

1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
        super().fit(
            X,
            _y,
            sample_weight=sample_weight,
            init_score=init_score,
            eval_set=valid_sets,
            eval_names=eval_names,
            eval_sample_weight=eval_sample_weight,
            eval_class_weight=eval_class_weight,
            eval_init_score=eval_init_score,
            eval_metric=eval_metric,
            feature_name=feature_name,
            categorical_feature=categorical_feature,
            callbacks=callbacks,
            init_model=init_model
        )
wxchan's avatar
wxchan committed
1117
1118
        return self

1119
    _base_doc = LGBMModel.fit.__doc__.replace("self : LGBMModel", "self : LGBMClassifier")  # type: ignore
1120
1121
    _base_doc = (_base_doc[:_base_doc.find('group :')]  # type: ignore
                 + _base_doc[_base_doc.find('eval_set :'):])  # type: ignore
1122
1123
    fit.__doc__ = (_base_doc[:_base_doc.find('eval_group :')]
                   + _base_doc[_base_doc.find('eval_metric :'):])
1124

1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
    def predict(
        self,
        X,
        raw_score: bool = False,
        start_iteration: int = 0,
        num_iteration: Optional[int] = None,
        pred_leaf: bool = False,
        pred_contrib: bool = False,
        validate_features: bool = False,
        **kwargs: Any
    ):
1136
        """Docstring is inherited from the LGBMModel."""
1137
        result = self.predict_proba(X, raw_score, start_iteration, num_iteration,
1138
1139
                                    pred_leaf, pred_contrib, validate_features,
                                    **kwargs)
1140
        if callable(self._objective) or raw_score or pred_leaf or pred_contrib:
1141
1142
1143
1144
            return result
        else:
            class_index = np.argmax(result, axis=1)
            return self._le.inverse_transform(class_index)
wxchan's avatar
wxchan committed
1145

1146
1147
    predict.__doc__ = LGBMModel.predict.__doc__

1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
    def predict_proba(
        self,
        X,
        raw_score: bool = False,
        start_iteration: int = 0,
        num_iteration: Optional[int] = None,
        pred_leaf: bool = False,
        pred_contrib: bool = False,
        validate_features: bool = False,
        **kwargs: Any
    ):
1159
        """Docstring is set after definition, using a template."""
1160
        result = super().predict(X, raw_score, start_iteration, num_iteration, pred_leaf, pred_contrib, validate_features, **kwargs)
1161
        if callable(self._objective) and not (raw_score or pred_leaf or pred_contrib):
1162
1163
1164
            _log_warning("Cannot compute class probabilities or labels "
                         "due to the usage of customized objective function.\n"
                         "Returning raw scores instead.")
1165
1166
            return result
        elif self._n_classes > 2 or raw_score or pred_leaf or pred_contrib:
1167
            return result
wxchan's avatar
wxchan committed
1168
        else:
1169
            return np.vstack((1. - result, result)).transpose()
1170

1171
1172
1173
1174
    predict_proba.__doc__ = _lgbmmodel_doc_predict.format(
        description="Return the predicted probability for each class for each sample.",
        X_shape="array-like or sparse matrix of shape = [n_samples, n_features]",
        output_name="predicted_probability",
1175
        predicted_result_shape="array-like of shape = [n_samples] or shape = [n_samples, n_classes]",
1176
1177
1178
1179
        X_leaves_shape="array-like of shape = [n_samples, n_trees] or shape = [n_samples, n_trees * n_classes]",
        X_SHAP_values_shape="array-like of shape = [n_samples, n_features + 1] or shape = [n_samples, (n_features + 1) * n_classes] or list with n_classes length of such objects"
    )

1180
1181
    @property
    def classes_(self):
1182
        """:obj:`array` of shape = [n_classes]: The class label array."""
1183
        if not self.__sklearn_is_fitted__():
1184
1185
            raise LGBMNotFittedError('No classes found. Need to call fit beforehand.')
        return self._classes
1186
1187

    @property
1188
    def n_classes_(self) -> int:
1189
        """:obj:`int`: The number of classes."""
1190
        if not self.__sklearn_is_fitted__():
1191
1192
            raise LGBMNotFittedError('No classes found. Need to call fit beforehand.')
        return self._n_classes
wxchan's avatar
wxchan committed
1193

wxchan's avatar
wxchan committed
1194

wxchan's avatar
wxchan committed
1195
class LGBMRanker(LGBMModel):
1196
1197
1198
1199
1200
1201
1202
1203
    """LightGBM ranker.

    .. warning::

        scikit-learn doesn't support ranking applications yet,
        therefore this class is not really compatible with the sklearn ecosystem.
        Please use this class mainly for training and applying ranking models in common sklearnish way.
    """
wxchan's avatar
wxchan committed
1204

1205
    def fit(  # type: ignore[override]
1206
1207
1208
1209
1210
1211
1212
        self,
        X,
        y,
        sample_weight=None,
        init_score=None,
        group=None,
        eval_set=None,
1213
        eval_names: Optional[List[str]] = None,
1214
1215
1216
        eval_sample_weight=None,
        eval_init_score=None,
        eval_group=None,
1217
        eval_metric: Optional[_LGBM_ScikitEvalMetricType] = None,
1218
        eval_at: Union[List[int], Tuple[int, ...]] = (1, 2, 3, 4, 5),
1219
1220
1221
        feature_name='auto',
        categorical_feature='auto',
        callbacks=None,
1222
        init_model: Optional[Union[str, Path, Booster, LGBMModel]] = None
1223
    ):
1224
        """Docstring is inherited from the LGBMModel."""
1225
        # check group data
Guolin Ke's avatar
Guolin Ke committed
1226
        if group is None:
1227
            raise ValueError("Should set group for ranking task")
wxchan's avatar
wxchan committed
1228
1229

        if eval_set is not None:
Guolin Ke's avatar
Guolin Ke committed
1230
            if eval_group is None:
1231
                raise ValueError("Eval_group cannot be None when eval_set is not None")
Guolin Ke's avatar
Guolin Ke committed
1232
            elif len(eval_group) != len(eval_set):
1233
                raise ValueError("Length of eval_group should be equal to eval_set")
1234
            elif (isinstance(eval_group, dict)
1235
                  and any(i not in eval_group or eval_group[i] is None for i in range(len(eval_group)))
1236
1237
                  or isinstance(eval_group, list)
                  and any(group is None for group in eval_group)):
1238
1239
                raise ValueError("Should set group for all eval datasets for ranking task; "
                                 "if you use dict, the index should start from 0")
1240

1241
        self._eval_at = eval_at
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
        super().fit(
            X,
            y,
            sample_weight=sample_weight,
            init_score=init_score,
            group=group,
            eval_set=eval_set,
            eval_names=eval_names,
            eval_sample_weight=eval_sample_weight,
            eval_init_score=eval_init_score,
            eval_group=eval_group,
            eval_metric=eval_metric,
            feature_name=feature_name,
            categorical_feature=categorical_feature,
            callbacks=callbacks,
            init_model=init_model
        )
wxchan's avatar
wxchan committed
1259
        return self
1260

1261
    _base_doc = LGBMModel.fit.__doc__.replace("self : LGBMModel", "self : LGBMRanker")  # type: ignore
1262
1263
    fit.__doc__ = (_base_doc[:_base_doc.find('eval_class_weight :')]  # type: ignore
                   + _base_doc[_base_doc.find('eval_init_score :'):])  # type: ignore
1264
    _base_doc = fit.__doc__
1265
    _before_feature_name, _feature_name, _after_feature_name = _base_doc.partition('feature_name :')
1266
    fit.__doc__ = f"""{_before_feature_name}eval_at : list or tuple of int, optional (default=(1, 2, 3, 4, 5))
1267
        The evaluation positions of the specified metric.
1268
    {_feature_name}{_after_feature_name}"""