sklearn.py 54.3 KB
Newer Older
wxchan's avatar
wxchan committed
1
# coding: utf-8
2
"""Scikit-learn wrapper interface for LightGBM."""
3
import copy
4
from inspect import signature
5
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
6

wxchan's avatar
wxchan committed
7
import numpy as np
8

9
from .basic import Booster, Dataset, LightGBMError, _choose_param_value, _ConfigAliases, _log_warning
10
from .callback import record_evaluation
11
12
from .compat import (SKLEARN_INSTALLED, LGBMNotFittedError, _LGBMAssertAllFinite, _LGBMCheckArray,
                     _LGBMCheckClassificationTargets, _LGBMCheckSampleWeight, _LGBMCheckXY, _LGBMClassifierBase,
13
14
                     _LGBMComputeSampleWeight, _LGBMCpuCount, _LGBMLabelEncoder, _LGBMModelBase, _LGBMRegressorBase,
                     dt_DataTable, pd_DataFrame)
wxchan's avatar
wxchan committed
15
from .engine import train
16

17
18
19
20
21
_EvalResultType = Tuple[str, float, bool]

_LGBM_ScikitCustomObjectiveFunction = Union[
    Callable[
        [np.ndarray, np.ndarray],
22
        Tuple[np.ndarray, np.ndarray]
23
24
25
    ],
    Callable[
        [np.ndarray, np.ndarray, np.ndarray],
26
        Tuple[np.ndarray, np.ndarray]
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
    ],
]
_LGBM_ScikitCustomEvalFunction = Union[
    Callable[
        [np.ndarray, np.ndarray],
        Union[_EvalResultType, List[_EvalResultType]]
    ],
    Callable[
        [np.ndarray, np.ndarray, np.ndarray],
        Union[_EvalResultType, List[_EvalResultType]]
    ],
    Callable[
        [np.ndarray, np.ndarray, np.ndarray, np.ndarray],
        Union[_EvalResultType, List[_EvalResultType]]
    ],
]

wxchan's avatar
wxchan committed
44

45
class _ObjectiveFunctionWrapper:
46
    """Proxy class for objective function."""
47

48
    def __init__(self, func: _LGBM_ScikitCustomObjectiveFunction):
49
        """Construct a proxy class.
50

51
52
        This class transforms objective function to match objective function with signature ``new_func(preds, dataset)``
        as expected by ``lightgbm.engine.train``.
53

54
55
56
        Parameters
        ----------
        func : callable
57
            Expects a callable with signature ``func(y_true, y_pred)`` or ``func(y_true, y_pred, group)``
58
59
            and returns (grad, hess):

60
                y_true : numpy 1-D array of shape = [n_samples]
61
                    The target values.
62
                y_pred : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
63
                    The predicted values.
64
65
                    Predicted values are returned before any transformation,
                    e.g. they are raw margin instead of probability of positive class for binary task.
66
                group : numpy 1-D array
67
68
69
                    Group/query data.
                    Only used in the learning-to-rank task.
                    sum(group) = n_samples.
70
71
                    For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
                    where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
72
                grad : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape [n_samples, n_classes] (for multi-class task)
73
74
                    The value of the first order derivative (gradient) of the loss
                    with respect to the elements of y_pred for each sample point.
75
                hess : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
76
77
                    The value of the second order derivative (Hessian) of the loss
                    with respect to the elements of y_pred for each sample point.
wxchan's avatar
wxchan committed
78

Nikita Titov's avatar
Nikita Titov committed
79
80
        .. note::

81
            For multi-class task, y_pred is a numpy 2-D array of shape = [n_samples, n_classes],
82
            and grad and hess should be returned in the same format.
83
84
        """
        self.func = func
wxchan's avatar
wxchan committed
85

86
    def __call__(self, preds: np.ndarray, dataset: Dataset) -> Tuple[np.ndarray, np.ndarray]:
87
88
89
90
        """Call passed function with appropriate arguments.

        Parameters
        ----------
91
        preds : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
92
93
94
95
96
97
            The predicted values.
        dataset : Dataset
            The training dataset.

        Returns
        -------
98
        grad : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
99
100
            The value of the first order derivative (gradient) of the loss
            with respect to the elements of preds for each sample point.
101
        hess : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
102
103
            The value of the second order derivative (Hessian) of the loss
            with respect to the elements of preds for each sample point.
104
        """
wxchan's avatar
wxchan committed
105
        labels = dataset.get_label()
106
        argc = len(signature(self.func).parameters)
107
        if argc == 2:
108
            grad, hess = self.func(labels, preds)
109
        elif argc == 3:
110
            grad, hess = self.func(labels, preds, dataset.get_group())
111
        else:
112
            raise TypeError(f"Self-defined objective function should have 2 or 3 arguments, got {argc}")
wxchan's avatar
wxchan committed
113
114
115
        """weighted for objective"""
        weight = dataset.get_weight()
        if weight is not None:
116
117
118
119
120
121
122
            if grad.ndim == 2:  # multi-class
                num_data = grad.shape[0]
                if weight.size != num_data:
                    raise ValueError("grad and hess should be of shape [n_samples, n_classes]")
                weight = weight.reshape(num_data, 1)
            grad *= weight
            hess *= weight
wxchan's avatar
wxchan committed
123
124
        return grad, hess

wxchan's avatar
wxchan committed
125

126
class _EvalFunctionWrapper:
127
    """Proxy class for evaluation function."""
128

129
    def __init__(self, func: _LGBM_ScikitCustomEvalFunction):
130
        """Construct a proxy class.
131

132
133
        This class transforms evaluation function to match evaluation function with signature ``new_func(preds, dataset)``
        as expected by ``lightgbm.engine.train``.
134

135
136
137
138
139
140
141
142
143
144
        Parameters
        ----------
        func : callable
            Expects a callable with following signatures:
            ``func(y_true, y_pred)``,
            ``func(y_true, y_pred, weight)``
            or ``func(y_true, y_pred, weight, group)``
            and returns (eval_name, eval_result, is_higher_better) or
            list of (eval_name, eval_result, is_higher_better):

145
                y_true : numpy 1-D array of shape = [n_samples]
146
                    The target values.
147
                y_pred : numpy 1-D array of shape = [n_samples] or numpy 2-D array shape = [n_samples, n_classes] (for multi-class task)
148
                    The predicted values.
149
150
                    In case of custom ``objective``, predicted values are returned before any transformation,
                    e.g. they are raw margin instead of probability of positive class for binary task in this case.
151
                weight : numpy 1-D array of shape = [n_samples]
152
                    The weight of samples. Weights should be non-negative.
153
                group : numpy 1-D array
154
155
156
                    Group/query data.
                    Only used in the learning-to-rank task.
                    sum(group) = n_samples.
157
158
                    For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
                    where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
159
                eval_name : str
Andrew Ziem's avatar
Andrew Ziem committed
160
                    The name of evaluation function (without whitespace).
161
162
163
164
165
166
                eval_result : float
                    The eval result.
                is_higher_better : bool
                    Is eval result higher better, e.g. AUC is ``is_higher_better``.
        """
        self.func = func
167

168
    def __call__(self, preds: np.ndarray, dataset: Dataset) -> Tuple[str, float, bool]:
169
        """Call passed function with appropriate arguments.
170

171
172
        Parameters
        ----------
173
        preds : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
174
175
176
177
178
179
            The predicted values.
        dataset : Dataset
            The training dataset.

        Returns
        -------
180
        eval_name : str
Andrew Ziem's avatar
Andrew Ziem committed
181
            The name of evaluation function (without whitespace).
182
183
184
185
186
        eval_result : float
            The eval result.
        is_higher_better : bool
            Is eval result higher better, e.g. AUC is ``is_higher_better``.
        """
187
        labels = dataset.get_label()
188
        argc = len(signature(self.func).parameters)
189
        if argc == 2:
190
            return self.func(labels, preds)
191
        elif argc == 3:
192
            return self.func(labels, preds, dataset.get_weight())
193
        elif argc == 4:
194
            return self.func(labels, preds, dataset.get_weight(), dataset.get_group())
195
        else:
196
            raise TypeError(f"Self-defined eval function should have 2, 3 or 4 arguments, got {argc}")
197

wxchan's avatar
wxchan committed
198

199
200
201
202
203
204
205
206
207
208
209
210
211
212
# documentation templates for LGBMModel methods are shared between the classes in
# this module and those in the ``dask`` module

_lgbmmodel_doc_fit = (
    """
    Build a gradient boosting model from the training set (X, y).

    Parameters
    ----------
    X : {X_shape}
        Input feature matrix.
    y : {y_shape}
        The target values (class labels in classification, real numbers in regression).
    sample_weight : {sample_weight_shape}
213
        Weights of training data. Weights should be non-negative.
214
    init_score : {init_score_shape}
215
216
217
218
219
220
221
222
223
        Init score of training data.
    group : {group_shape}
        Group/query data.
        Only used in the learning-to-rank task.
        sum(group) = n_samples.
        For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
        where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
    eval_set : list or None, optional (default=None)
        A list of (X, y) tuple pairs to use as validation sets.
224
    eval_names : list of str, or None, optional (default=None)
225
        Names of eval_set.
226
    eval_sample_weight : {eval_sample_weight_shape}
227
        Weights of eval data. Weights should be non-negative.
228
229
    eval_class_weight : list or None, optional (default=None)
        Class weights of eval data.
230
    eval_init_score : {eval_init_score_shape}
231
        Init score of eval data.
232
    eval_group : {eval_group_shape}
233
        Group data of eval data.
234
235
    eval_metric : str, callable, list or None, optional (default=None)
        If str, it should be a built-in evaluation metric to use.
236
237
238
239
        If callable, it should be a custom evaluation metric, see note below for more details.
        If list, it can be a list of built-in metrics, a list of custom evaluation metrics, or a mix of both.
        In either case, the ``metric`` from the model parameters will be evaluated and used as well.
        Default: 'l2' for LGBMRegressor, 'logloss' for LGBMClassifier, 'ndcg' for LGBMRanker.
240
    feature_name : list of str, or 'auto', optional (default='auto')
241
242
        Feature names.
        If 'auto' and data is pandas DataFrame, data columns names are used.
243
    categorical_feature : list of str or int, or 'auto', optional (default='auto')
244
245
        Categorical features.
        If list of int, interpreted as indices.
246
        If list of str, interpreted as feature names (need to specify ``feature_name`` as well).
247
        If 'auto' and data is pandas DataFrame, pandas unordered categorical columns are used.
248
        All values in categorical features will be cast to int32 and thus should be less than int32 max value (2147483647).
249
250
251
        Large values could be memory consuming. Consider using consecutive integers starting from zero.
        All negative values in categorical features will be treated as missing values.
        The output cannot be monotonically constrained with respect to a categorical feature.
252
        Floating point numbers in categorical features will be rounded towards 0.
253
    callbacks : list of callable, or None, optional (default=None)
254
255
        List of callback functions that are applied at each iteration.
        See Callbacks in Python API for more information.
256
    init_model : str, pathlib.Path, Booster, LGBMModel or None, optional (default=None)
257
258
259
260
        Filename of LightGBM model, Booster instance or LGBMModel instance used for continue training.

    Returns
    -------
261
    self : LGBMModel
262
263
264
265
266
267
268
269
270
271
272
273
274
        Returns self.
    """
)

_lgbmmodel_doc_custom_eval_note = """
    Note
    ----
    Custom eval function expects a callable with following signatures:
    ``func(y_true, y_pred)``, ``func(y_true, y_pred, weight)`` or
    ``func(y_true, y_pred, weight, group)``
    and returns (eval_name, eval_result, is_higher_better) or
    list of (eval_name, eval_result, is_higher_better):

275
        y_true : numpy 1-D array of shape = [n_samples]
276
            The target values.
277
        y_pred : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
278
            The predicted values.
279
280
            In case of custom ``objective``, predicted values are returned before any transformation,
            e.g. they are raw margin instead of probability of positive class for binary task in this case.
281
        weight : numpy 1-D array of shape = [n_samples]
282
            The weight of samples. Weights should be non-negative.
283
        group : numpy 1-D array
284
285
286
287
288
            Group/query data.
            Only used in the learning-to-rank task.
            sum(group) = n_samples.
            For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
            where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
289
        eval_name : str
Andrew Ziem's avatar
Andrew Ziem committed
290
            The name of evaluation function (without whitespace).
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
        eval_result : float
            The eval result.
        is_higher_better : bool
            Is eval result higher better, e.g. AUC is ``is_higher_better``.
"""

_lgbmmodel_doc_predict = (
    """
    {description}

    Parameters
    ----------
    X : {X_shape}
        Input features matrix.
    raw_score : bool, optional (default=False)
        Whether to predict raw scores.
    start_iteration : int, optional (default=0)
        Start index of the iteration to predict.
        If <= 0, starts from the first iteration.
    num_iteration : int or None, optional (default=None)
        Total number of iterations used in the prediction.
        If None, if the best iteration exists and start_iteration <= 0, the best iteration is used;
        otherwise, all iterations from ``start_iteration`` are used (no limits).
        If <= 0, all iterations from ``start_iteration`` are used (no limits).
    pred_leaf : bool, optional (default=False)
        Whether to predict leaf index.
    pred_contrib : bool, optional (default=False)
        Whether to predict feature contributions.

        .. note::

            If you want to get more explanations for your model's predictions using SHAP values,
            like SHAP interaction values,
            you can install the shap package (https://github.com/slundberg/shap).
            Note that unlike the shap package, with ``pred_contrib`` we return a matrix with an extra
            column, where the last column is the expected value.

    **kwargs
        Other parameters for the prediction.

    Returns
    -------
    {output_name} : {predicted_result_shape}
        The predicted values.
    X_leaves : {X_leaves_shape}
        If ``pred_leaf=True``, the predicted leaf of every tree for each sample.
    X_SHAP_values : {X_SHAP_values_shape}
        If ``pred_contrib=True``, the feature contributions for each sample.
    """
)


343
344
class LGBMModel(_LGBMModelBase):
    """Implementation of the scikit-learn API for LightGBM."""
wxchan's avatar
wxchan committed
345

346
347
348
349
350
351
352
353
    def __init__(
        self,
        boosting_type: str = 'gbdt',
        num_leaves: int = 31,
        max_depth: int = -1,
        learning_rate: float = 0.1,
        n_estimators: int = 100,
        subsample_for_bin: int = 200000,
354
        objective: Optional[Union[str, _LGBM_ScikitCustomObjectiveFunction]] = None,
355
356
357
358
359
360
361
362
363
364
        class_weight: Optional[Union[Dict, str]] = None,
        min_split_gain: float = 0.,
        min_child_weight: float = 1e-3,
        min_child_samples: int = 20,
        subsample: float = 1.,
        subsample_freq: int = 0,
        colsample_bytree: float = 1.,
        reg_alpha: float = 0.,
        reg_lambda: float = 0.,
        random_state: Optional[Union[int, np.random.RandomState]] = None,
365
        n_jobs: Optional[int] = None,
366
367
368
        importance_type: str = 'split',
        **kwargs
    ):
369
        r"""Construct a gradient boosting model.
wxchan's avatar
wxchan committed
370
371
372

        Parameters
        ----------
373
        boosting_type : str, optional (default='gbdt')
374
375
376
377
378
            'gbdt', traditional Gradient Boosting Decision Tree.
            'dart', Dropouts meet Multiple Additive Regression Trees.
            'goss', Gradient-based One-Side Sampling.
            'rf', Random Forest.
        num_leaves : int, optional (default=31)
wxchan's avatar
wxchan committed
379
            Maximum tree leaves for base learners.
380
        max_depth : int, optional (default=-1)
381
            Maximum tree depth for base learners, <=0 means no limit.
382
        learning_rate : float, optional (default=0.1)
383
            Boosting learning rate.
384
385
386
            You can use ``callbacks`` parameter of ``fit`` method to shrink/adapt learning rate
            in training using ``reset_parameter`` callback.
            Note, that this will ignore the ``learning_rate`` argument in training.
387
        n_estimators : int, optional (default=100)
wxchan's avatar
wxchan committed
388
            Number of boosted trees to fit.
389
        subsample_for_bin : int, optional (default=200000)
wxchan's avatar
wxchan committed
390
            Number of samples for constructing bins.
391
        objective : str, callable or None, optional (default=None)
wxchan's avatar
wxchan committed
392
393
            Specify the learning task and the corresponding learning objective or
            a custom objective function to be used (see note below).
394
            Default: 'regression' for LGBMRegressor, 'binary' or 'multiclass' for LGBMClassifier, 'lambdarank' for LGBMRanker.
395
396
397
398
        class_weight : dict, 'balanced' or None, optional (default=None)
            Weights associated with classes in the form ``{class_label: weight}``.
            Use this parameter only for multi-class classification task;
            for binary classification task you may use ``is_unbalance`` or ``scale_pos_weight`` parameters.
399
400
401
            Note, that the usage of all these parameters will result in poor estimates of the individual class probabilities.
            You may want to consider performing probability calibration
            (https://scikit-learn.org/stable/modules/calibration.html) of your model.
402
403
404
            The 'balanced' mode uses the values of y to automatically adjust weights
            inversely proportional to class frequencies in the input data as ``n_samples / (n_classes * np.bincount(y))``.
            If None, all classes are supposed to have weight one.
405
            Note, that these weights will be multiplied with ``sample_weight`` (passed through the ``fit`` method)
406
            if ``sample_weight`` is specified.
407
        min_split_gain : float, optional (default=0.)
wxchan's avatar
wxchan committed
408
            Minimum loss reduction required to make a further partition on a leaf node of the tree.
409
        min_child_weight : float, optional (default=1e-3)
410
            Minimum sum of instance weight (Hessian) needed in a child (leaf).
411
        min_child_samples : int, optional (default=20)
412
            Minimum number of data needed in a child (leaf).
413
        subsample : float, optional (default=1.)
wxchan's avatar
wxchan committed
414
            Subsample ratio of the training instance.
415
        subsample_freq : int, optional (default=0)
Andrew Ziem's avatar
Andrew Ziem committed
416
            Frequency of subsample, <=0 means no enable.
417
        colsample_bytree : float, optional (default=1.)
wxchan's avatar
wxchan committed
418
            Subsample ratio of columns when constructing each tree.
419
        reg_alpha : float, optional (default=0.)
420
            L1 regularization term on weights.
421
        reg_lambda : float, optional (default=0.)
422
            L2 regularization term on weights.
423
        random_state : int, RandomState object or None, optional (default=None)
wxchan's avatar
wxchan committed
424
            Random number seed.
425
426
427
            If int, this number is used to seed the C++ code.
            If RandomState object (numpy), a random integer is picked based on its state to seed the C++ code.
            If None, default seeds in C++ code are used.
428
429
430
431
432
433
434
435
436
437
438
439
        n_jobs : int or None, optional (default=None)
            Number of parallel threads to use for training (can be changed at prediction time by
            passing it as an extra keyword argument).

            For better performance, it is recommended to set this to the number of physical cores
            in the CPU.

            Negative integers are interpreted as following joblib's formula (n_cpus + 1 + n_jobs), just like
            scikit-learn (so e.g. -1 means using all threads). A value of zero corresponds the default number of
            threads configured for OpenMP in the system. A value of ``None`` (the default) corresponds
            to using the number of physical cores in the system (its correct detection requires
            either the ``joblib`` or the ``psutil`` util libraries to be installed).
440
        importance_type : str, optional (default='split')
441
            The type of feature importance to be filled into ``feature_importances_``.
442
443
444
445
            If 'split', result contains numbers of times the feature is used in a model.
            If 'gain', result contains total gains of splits which use the feature.
        **kwargs
            Other parameters for the model.
wxchan's avatar
wxchan committed
446
            Check http://lightgbm.readthedocs.io/en/latest/Parameters.html for more parameters.
447

Nikita Titov's avatar
Nikita Titov committed
448
449
450
            .. warning::

                \*\*kwargs is not supported in sklearn, it may cause unexpected issues.
wxchan's avatar
wxchan committed
451
452
453

        Note
        ----
454
455
        A custom objective function can be provided for the ``objective`` parameter.
        In this case, it should have the signature
456
457
        ``objective(y_true, y_pred) -> grad, hess`` or
        ``objective(y_true, y_pred, group) -> grad, hess``:
wxchan's avatar
wxchan committed
458

459
            y_true : numpy 1-D array of shape = [n_samples]
460
                The target values.
461
            y_pred : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
462
                The predicted values.
463
464
                Predicted values are returned before any transformation,
                e.g. they are raw margin instead of probability of positive class for binary task.
465
            group : numpy 1-D array
466
467
468
                Group/query data.
                Only used in the learning-to-rank task.
                sum(group) = n_samples.
469
470
                For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
                where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
471
            grad : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
472
473
                The value of the first order derivative (gradient) of the loss
                with respect to the elements of y_pred for each sample point.
474
            hess : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
475
476
                The value of the second order derivative (Hessian) of the loss
                with respect to the elements of y_pred for each sample point.
wxchan's avatar
wxchan committed
477

478
        For multi-class task, y_pred is a numpy 2-D array of shape = [n_samples, n_classes],
479
        and grad and hess should be returned in the same format.
wxchan's avatar
wxchan committed
480
        """
wxchan's avatar
wxchan committed
481
        if not SKLEARN_INSTALLED:
482
483
            raise LightGBMError('scikit-learn is required for lightgbm.sklearn. '
                                'You must install scikit-learn and restart your session to use this module.')
wxchan's avatar
wxchan committed
484

485
        self.boosting_type = boosting_type
486
        self.objective = objective
wxchan's avatar
wxchan committed
487
488
489
490
        self.num_leaves = num_leaves
        self.max_depth = max_depth
        self.learning_rate = learning_rate
        self.n_estimators = n_estimators
wxchan's avatar
wxchan committed
491
        self.subsample_for_bin = subsample_for_bin
wxchan's avatar
wxchan committed
492
493
494
495
496
497
498
499
        self.min_split_gain = min_split_gain
        self.min_child_weight = min_child_weight
        self.min_child_samples = min_child_samples
        self.subsample = subsample
        self.subsample_freq = subsample_freq
        self.colsample_bytree = colsample_bytree
        self.reg_alpha = reg_alpha
        self.reg_lambda = reg_lambda
500
501
        self.random_state = random_state
        self.n_jobs = n_jobs
502
        self.importance_type = importance_type
503
        self._Booster: Optional[Booster] = None
504
505
506
        self._evals_result = None
        self._best_score = None
        self._best_iteration = None
507
        self._other_params: Dict[str, Any] = {}
508
        self._objective = objective
509
        self.class_weight = class_weight
510
511
        self._class_weight = None
        self._class_map = None
512
        self._n_features = None
513
        self._n_features_in = None
514
515
        self._classes = None
        self._n_classes = None
516
        self.set_params(**kwargs)
wxchan's avatar
wxchan committed
517

518
    def _more_tags(self) -> Dict[str, Any]:
519
520
521
522
523
524
525
526
527
528
        return {
            'allow_nan': True,
            'X_types': ['2darray', 'sparse', '1dlabels'],
            '_xfail_checks': {
                'check_no_attributes_set_in_init':
                'scikit-learn incorrectly asserts that private attributes '
                'cannot be set in __init__: '
                '(see https://github.com/microsoft/LightGBM/issues/2628)'
            }
        }
Nikita Titov's avatar
Nikita Titov committed
529

530
531
532
    def __sklearn_is_fitted__(self) -> bool:
        return getattr(self, "fitted_", False)

533
    def get_params(self, deep: bool = True) -> Dict[str, Any]:
534
535
536
537
538
539
540
541
542
543
544
545
546
        """Get parameters for this estimator.

        Parameters
        ----------
        deep : bool, optional (default=True)
            If True, will return the parameters for this estimator and
            contained subobjects that are estimators.

        Returns
        -------
        params : dict
            Parameter names mapped to their values.
        """
547
        params = super().get_params(deep=deep)
548
        params.update(self._other_params)
wxchan's avatar
wxchan committed
549
550
        return params

551
    def set_params(self, **params: Any) -> "LGBMModel":
552
553
554
555
556
557
558
559
560
561
562
563
        """Set the parameters of this estimator.

        Parameters
        ----------
        **params
            Parameter names with their new values.

        Returns
        -------
        self : object
            Returns self.
        """
wxchan's avatar
wxchan committed
564
565
        for key, value in params.items():
            setattr(self, key, value)
566
567
            if hasattr(self, f"_{key}"):
                setattr(self, f"_{key}", value)
568
            self._other_params[key] = value
wxchan's avatar
wxchan committed
569
        return self
wxchan's avatar
wxchan committed
570

571
572
573
574
575
576
577
578
579
580
581
582
583
584
    def _process_params(self, stage: str) -> Dict[str, Any]:
        """Process the parameters of this estimator based on its type, parameter aliases, etc.

        Parameters
        ----------
        stage : str
            Name of the stage (can be ``fit`` or ``predict``) this method is called from.

        Returns
        -------
        processed_params : dict
            Processed parameter names mapped to their values.
        """
        assert stage in {"fit", "predict"}
585
586
587
588
589
        params = self.get_params()

        params.pop('objective', None)
        for alias in _ConfigAliases.get('objective'):
            if alias in params:
590
                obj = params.pop(alias)
591
                _log_warning(f"Found '{alias}' in params. Will use it instead of 'objective' argument")
592
593
594
595
596
597
598
599
600
601
602
603
604
                if stage == "fit":
                    self._objective = obj
        if stage == "fit":
            if self._objective is None:
                if isinstance(self, LGBMRegressor):
                    self._objective = "regression"
                elif isinstance(self, LGBMClassifier):
                    if self._n_classes > 2:
                        self._objective = "multiclass"
                    else:
                        self._objective = "binary"
                elif isinstance(self, LGBMRanker):
                    self._objective = "lambdarank"
605
                else:
606
                    raise ValueError("Unknown LGBMModel type.")
607
        if callable(self._objective):
608
            if stage == "fit":
609
610
611
                params['objective'] = _ObjectiveFunctionWrapper(self._objective)
            else:
                params['objective'] = 'None'
612
        else:
613
            params['objective'] = self._objective
614

615
        params.pop('importance_type', None)
wxchan's avatar
wxchan committed
616
        params.pop('n_estimators', None)
617
        params.pop('class_weight', None)
618

619
620
        if isinstance(params['random_state'], np.random.RandomState):
            params['random_state'] = params['random_state'].randint(np.iinfo(np.int32).max)
621
        if self._n_classes is not None and self._n_classes > 2:
622
623
            for alias in _ConfigAliases.get('num_class'):
                params.pop(alias, None)
624
625
            params['num_class'] = self._n_classes
        if hasattr(self, '_eval_at'):
626
            eval_at = self._eval_at
627
            for alias in _ConfigAliases.get('eval_at'):
628
629
630
631
                if alias in params:
                    _log_warning(f"Found '{alias}' in params. Will use it instead of 'eval_at' argument")
                    eval_at = params.pop(alias)
            params['eval_at'] = eval_at
wxchan's avatar
wxchan committed
632

633
        # register default metric for consistency with callable eval_metric case
634
        original_metric = self._objective if isinstance(self._objective, str) else None
635
636
637
638
639
640
641
642
643
644
        if original_metric is None:
            # try to deduce from class instance
            if isinstance(self, LGBMRegressor):
                original_metric = "l2"
            elif isinstance(self, LGBMClassifier):
                original_metric = "multi_logloss" if self._n_classes > 2 else "binary_logloss"
            elif isinstance(self, LGBMRanker):
                original_metric = "ndcg"

        # overwrite default metric by explicitly set metric
645
        params = _choose_param_value("metric", params, original_metric)
646

647
648
649
650
651
652
        # use joblib conventions for negative n_jobs, just like scikit-learn
        # at predict time, this is handled later due to the order of parameter updates
        if stage == "fit":
            params = _choose_param_value("num_threads", params, self.n_jobs)
            params["num_threads"] = self._process_n_jobs(params["num_threads"])

653
654
        return params

655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
    def _process_n_jobs(self, n_jobs: Optional[int]) -> int:
        """Convert special values of n_jobs to their actual values according to the formulas that apply.

        Parameters
        ----------
        n_jobs : int or None
            The original value of n_jobs, potentially having special values such as 'None' or
            negative integers.

        Returns
        -------
        n_jobs : int
            The value of n_jobs with special values converted to actual number of threads.
        """
        if n_jobs is None:
            n_jobs = _LGBMCpuCount(only_physical_cores=True)
        elif n_jobs < 0:
            n_jobs = max(_LGBMCpuCount(only_physical_cores=False) + 1 + n_jobs, 1)
        return n_jobs

675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
    def fit(
        self,
        X,
        y,
        sample_weight=None,
        init_score=None,
        group=None,
        eval_set=None,
        eval_names=None,
        eval_sample_weight=None,
        eval_class_weight=None,
        eval_init_score=None,
        eval_group=None,
        eval_metric=None,
        feature_name='auto',
        categorical_feature='auto',
        callbacks=None,
        init_model=None
    ):
694
695
696
697
698
699
700
701
702
703
704
705
706
        """Docstring is set after definition, using a template."""
        params = self._process_params(stage="fit")

        # Do not modify original args in fit function
        # Refer to https://github.com/microsoft/LightGBM/pull/2619
        eval_metric_list = copy.deepcopy(eval_metric)
        if not isinstance(eval_metric_list, list):
            eval_metric_list = [eval_metric_list]

        # Separate built-in from callable evaluation metrics
        eval_metrics_callable = [_EvalFunctionWrapper(f) for f in eval_metric_list if callable(f)]
        eval_metrics_builtin = [m for m in eval_metric_list if isinstance(m, str)]

707
        # concatenate metric from params (or default if not provided in params) and eval_metric
708
709
        params['metric'] = [params['metric']] if isinstance(params['metric'], (str, type(None))) else params['metric']
        params['metric'] = [e for e in eval_metrics_builtin if e not in params['metric']] + params['metric']
710
        params['metric'] = [metric for metric in params['metric'] if metric is not None]
wxchan's avatar
wxchan committed
711

712
        if not isinstance(X, (pd_DataFrame, dt_DataTable)):
713
            _X, _y = _LGBMCheckXY(X, y, accept_sparse=True, force_all_finite=False, ensure_min_samples=2)
714
715
            if sample_weight is not None:
                sample_weight = _LGBMCheckSampleWeight(sample_weight, _X)
716
717
        else:
            _X, _y = X, y
718

719
720
721
722
        if self._class_weight is None:
            self._class_weight = self.class_weight
        if self._class_weight is not None:
            class_sample_weight = _LGBMComputeSampleWeight(self._class_weight, y)
723
724
725
726
            if sample_weight is None or len(sample_weight) == 0:
                sample_weight = class_sample_weight
            else:
                sample_weight = np.multiply(sample_weight, class_sample_weight)
727

728
        self._n_features = _X.shape[1]
729
730
        # copy for consistency
        self._n_features_in = self._n_features
731

732
733
        def _construct_dataset(X, y, sample_weight, init_score, group, params,
                               categorical_feature='auto'):
734
            return Dataset(X, label=y, weight=sample_weight, group=group,
735
736
                           init_score=init_score, params=params,
                           categorical_feature=categorical_feature)
Guolin Ke's avatar
Guolin Ke committed
737

738
739
        train_set = _construct_dataset(_X, _y, sample_weight, init_score, group, params,
                                       categorical_feature=categorical_feature)
Guolin Ke's avatar
Guolin Ke committed
740
741
742

        valid_sets = []
        if eval_set is not None:
743

744
            def _get_meta_data(collection, name, i):
745
746
747
748
749
750
751
                if collection is None:
                    return None
                elif isinstance(collection, list):
                    return collection[i] if len(collection) > i else None
                elif isinstance(collection, dict):
                    return collection.get(i, None)
                else:
752
                    raise TypeError(f"{name} should be dict or list")
753

Guolin Ke's avatar
Guolin Ke committed
754
755
756
            if isinstance(eval_set, tuple):
                eval_set = [eval_set]
            for i, valid_data in enumerate(eval_set):
757
                # reduce cost for prediction training data
Guolin Ke's avatar
Guolin Ke committed
758
759
760
                if valid_data[0] is X and valid_data[1] is y:
                    valid_set = train_set
                else:
761
762
763
764
765
766
                    valid_weight = _get_meta_data(eval_sample_weight, 'eval_sample_weight', i)
                    valid_class_weight = _get_meta_data(eval_class_weight, 'eval_class_weight', i)
                    if valid_class_weight is not None:
                        if isinstance(valid_class_weight, dict) and self._class_map is not None:
                            valid_class_weight = {self._class_map[k]: v for k, v in valid_class_weight.items()}
                        valid_class_sample_weight = _LGBMComputeSampleWeight(valid_class_weight, valid_data[1])
767
768
769
770
                        if valid_weight is None or len(valid_weight) == 0:
                            valid_weight = valid_class_sample_weight
                        else:
                            valid_weight = np.multiply(valid_weight, valid_class_sample_weight)
771
772
                    valid_init_score = _get_meta_data(eval_init_score, 'eval_init_score', i)
                    valid_group = _get_meta_data(eval_group, 'eval_group', i)
773
774
                    valid_set = _construct_dataset(valid_data[0], valid_data[1],
                                                   valid_weight, valid_init_score, valid_group, params)
Guolin Ke's avatar
Guolin Ke committed
775
776
                valid_sets.append(valid_set)

777
778
779
        if isinstance(init_model, LGBMModel):
            init_model = init_model.booster_

780
781
782
        if callbacks is None:
            callbacks = []
        else:
783
            callbacks = copy.copy(callbacks)  # don't use deepcopy here to allow non-serializable objects
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798

        evals_result = {}
        callbacks.append(record_evaluation(evals_result))

        self._Booster = train(
            params=params,
            train_set=train_set,
            num_boost_round=self.n_estimators,
            valid_sets=valid_sets,
            valid_names=eval_names,
            feval=eval_metrics_callable,
            init_model=init_model,
            feature_name=feature_name,
            callbacks=callbacks
        )
wxchan's avatar
wxchan committed
799

800
        self._evals_result = evals_result
801
        self._best_iteration = self._Booster.best_iteration
802
        self._best_score = self._Booster.best_score
wxchan's avatar
wxchan committed
803

804
805
        self.fitted_ = True

wxchan's avatar
wxchan committed
806
        # free dataset
807
        self._Booster.free_dataset()
wxchan's avatar
wxchan committed
808
        del train_set, valid_sets
wxchan's avatar
wxchan committed
809
810
        return self

811
812
813
814
    fit.__doc__ = _lgbmmodel_doc_fit.format(
        X_shape="array-like or sparse matrix of shape = [n_samples, n_features]",
        y_shape="array-like of shape = [n_samples]",
        sample_weight_shape="array-like of shape = [n_samples] or None, optional (default=None)",
815
        init_score_shape="array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task) or shape = [n_samples, n_classes] (for multi-class task) or None, optional (default=None)",
816
        group_shape="array-like or None, optional (default=None)",
817
818
819
        eval_sample_weight_shape="list of array, or None, optional (default=None)",
        eval_init_score_shape="list of array, or None, optional (default=None)",
        eval_group_shape="list of array, or None, optional (default=None)"
820
821
    ) + "\n\n" + _lgbmmodel_doc_custom_eval_note

822
    def predict(self, X, raw_score=False, start_iteration=0, num_iteration=None,
823
                pred_leaf=False, pred_contrib=False, **kwargs):
824
        """Docstring is set after definition, using a template."""
825
        if not self.__sklearn_is_fitted__():
826
            raise LGBMNotFittedError("Estimator not fitted, call fit before exploiting the model.")
827
        if not isinstance(X, (pd_DataFrame, dt_DataTable)):
828
            X = _LGBMCheckArray(X, accept_sparse=True, force_all_finite=False)
829
830
831
        n_features = X.shape[1]
        if self._n_features != n_features:
            raise ValueError("Number of features of the model must "
832
833
                             f"match the input. Model n_features_ is {self._n_features} and "
                             f"input n_features is {n_features}")
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
        # retrive original params that possibly can be used in both training and prediction
        # and then overwrite them (considering aliases) with params that were passed directly in prediction
        predict_params = self._process_params(stage="predict")
        for alias in _ConfigAliases.get_by_alias(
            "data",
            "X",
            "raw_score",
            "start_iteration",
            "num_iteration",
            "pred_leaf",
            "pred_contrib",
            *kwargs.keys()
        ):
            predict_params.pop(alias, None)
        predict_params.update(kwargs)
849
850
851
852
853
854
855
856
857

        # number of threads can have values with special meaning which is only applied
        # in the scikit-learn interface, these should not reach the c++ side as-is
        n_jobs = self.n_jobs
        for alias in _ConfigAliases.get("num_threads"):
            if alias in predict_params:
                n_jobs = predict_params.pop(alias)
        predict_params["num_threads"] = self._process_n_jobs(n_jobs)

858
        return self._Booster.predict(X, raw_score=raw_score, start_iteration=start_iteration, num_iteration=num_iteration,
859
                                     pred_leaf=pred_leaf, pred_contrib=pred_contrib, **predict_params)
wxchan's avatar
wxchan committed
860

861
862
863
864
865
866
867
868
869
    predict.__doc__ = _lgbmmodel_doc_predict.format(
        description="Return the predicted value for each sample.",
        X_shape="array-like or sparse matrix of shape = [n_samples, n_features]",
        output_name="predicted_result",
        predicted_result_shape="array-like of shape = [n_samples] or shape = [n_samples, n_classes]",
        X_leaves_shape="array-like of shape = [n_samples, n_trees] or shape = [n_samples, n_trees * n_classes]",
        X_SHAP_values_shape="array-like of shape = [n_samples, n_features + 1] or shape = [n_samples, (n_features + 1) * n_classes] or list with n_classes length of such objects"
    )

870
    @property
871
    def n_features_(self) -> int:
872
        """:obj:`int`: The number of features of fitted model."""
873
        if not self.__sklearn_is_fitted__():
874
875
876
            raise LGBMNotFittedError('No n_features found. Need to call fit beforehand.')
        return self._n_features

877
    @property
878
    def n_features_in_(self) -> int:
879
        """:obj:`int`: The number of features of fitted model."""
880
        if not self.__sklearn_is_fitted__():
881
882
883
            raise LGBMNotFittedError('No n_features_in found. Need to call fit beforehand.')
        return self._n_features_in

884
885
    @property
    def best_score_(self):
886
        """:obj:`dict`: The best score of fitted model."""
887
        if not self.__sklearn_is_fitted__():
888
889
890
891
            raise LGBMNotFittedError('No best_score found. Need to call fit beforehand.')
        return self._best_score

    @property
892
    def best_iteration_(self) -> int:
893
        """:obj:`int`: The best iteration of fitted model if ``early_stopping()`` callback has been specified."""
894
        if not self.__sklearn_is_fitted__():
895
            raise LGBMNotFittedError('No best_iteration found. Need to call fit with early_stopping callback beforehand.')
896
897
898
        return self._best_iteration

    @property
899
    def objective_(self) -> Union[str, _LGBM_ScikitCustomObjectiveFunction]:
900
        """:obj:`str` or :obj:`callable`: The concrete objective used while fitting this model."""
901
        if not self.__sklearn_is_fitted__():
902
903
904
            raise LGBMNotFittedError('No objective found. Need to call fit beforehand.')
        return self._objective

905
906
907
908
909
910
911
912
913
    @property
    def n_estimators_(self) -> int:
        """:obj:`int`: True number of boosting iterations performed.

        This might be less than parameter ``n_estimators`` if early stopping was enabled or
        if boosting stopped early due to limits on complexity like ``min_gain_to_split``.
        """
        if not self.__sklearn_is_fitted__():
            raise LGBMNotFittedError('No n_estimators found. Need to call fit beforehand.')
914
        return self._Booster.current_iteration()  # type: ignore
915
916
917
918
919
920
921
922
923
924

    @property
    def n_iter_(self) -> int:
        """:obj:`int`: True number of boosting iterations performed.

        This might be less than parameter ``n_estimators`` if early stopping was enabled or
        if boosting stopped early due to limits on complexity like ``min_gain_to_split``.
        """
        if not self.__sklearn_is_fitted__():
            raise LGBMNotFittedError('No n_iter found. Need to call fit beforehand.')
925
        return self._Booster.current_iteration()  # type: ignore
926

927
928
    @property
    def booster_(self):
929
        """Booster: The underlying Booster of this model."""
930
        if not self.__sklearn_is_fitted__():
931
            raise LGBMNotFittedError('No booster found. Need to call fit beforehand.')
932
        return self._Booster
wxchan's avatar
wxchan committed
933

934
935
    @property
    def evals_result_(self):
936
        """:obj:`dict`: The evaluation results if validation sets have been specified."""
937
        if not self.__sklearn_is_fitted__():
938
939
            raise LGBMNotFittedError('No results found. Need to call fit with eval_set beforehand.')
        return self._evals_result
940
941

    @property
942
    def feature_importances_(self):
943
        """:obj:`array` of shape = [n_features]: The feature importances (the higher, the more important).
944

Nikita Titov's avatar
Nikita Titov committed
945
946
947
948
        .. note::

            ``importance_type`` attribute is passed to the function
            to configure the type of importance values to be extracted.
949
        """
950
        if not self.__sklearn_is_fitted__():
951
            raise LGBMNotFittedError('No feature_importances found. Need to call fit beforehand.')
952
        return self._Booster.feature_importance(importance_type=self.importance_type)
wxchan's avatar
wxchan committed
953

954
955
    @property
    def feature_name_(self):
956
        """:obj:`array` of shape = [n_features]: The names of features."""
957
        if not self.__sklearn_is_fitted__():
958
959
960
            raise LGBMNotFittedError('No feature_name found. Need to call fit beforehand.')
        return self._Booster.feature_name()

wxchan's avatar
wxchan committed
961

962
class LGBMRegressor(_LGBMRegressorBase, LGBMModel):
963
    """LightGBM regressor."""
wxchan's avatar
wxchan committed
964

965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
    def fit(
        self,
        X,
        y,
        sample_weight=None,
        init_score=None,
        eval_set=None,
        eval_names=None,
        eval_sample_weight=None,
        eval_init_score=None,
        eval_metric=None,
        feature_name='auto',
        categorical_feature='auto',
        callbacks=None,
        init_model=None
    ):
981
        """Docstring is inherited from the LGBMModel."""
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
        super().fit(
            X,
            y,
            sample_weight=sample_weight,
            init_score=init_score,
            eval_set=eval_set,
            eval_names=eval_names,
            eval_sample_weight=eval_sample_weight,
            eval_init_score=eval_init_score,
            eval_metric=eval_metric,
            feature_name=feature_name,
            categorical_feature=categorical_feature,
            callbacks=callbacks,
            init_model=init_model
        )
Guolin Ke's avatar
Guolin Ke committed
997
998
        return self

999
    _base_doc = LGBMModel.fit.__doc__.replace("self : LGBMModel", "self : LGBMRegressor")  # type: ignore
1000
1001
    _base_doc = (_base_doc[:_base_doc.find('group :')]  # type: ignore
                 + _base_doc[_base_doc.find('eval_set :'):])  # type: ignore
1002
1003
1004
1005
    _base_doc = (_base_doc[:_base_doc.find('eval_class_weight :')]
                 + _base_doc[_base_doc.find('eval_init_score :'):])
    fit.__doc__ = (_base_doc[:_base_doc.find('eval_group :')]
                   + _base_doc[_base_doc.find('eval_metric :'):])
wxchan's avatar
wxchan committed
1006

1007

1008
class LGBMClassifier(_LGBMClassifierBase, LGBMModel):
1009
    """LightGBM classifier."""
wxchan's avatar
wxchan committed
1010

1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
    def fit(
        self,
        X,
        y,
        sample_weight=None,
        init_score=None,
        eval_set=None,
        eval_names=None,
        eval_sample_weight=None,
        eval_class_weight=None,
        eval_init_score=None,
        eval_metric=None,
        feature_name='auto',
        categorical_feature='auto',
        callbacks=None,
        init_model=None
    ):
1028
        """Docstring is inherited from the LGBMModel."""
1029
        _LGBMAssertAllFinite(y)
1030
1031
        _LGBMCheckClassificationTargets(y)
        self._le = _LGBMLabelEncoder().fit(y)
1032
        _y = self._le.transform(y)
1033
        self._class_map = dict(zip(self._le.classes_, self._le.transform(self._le.classes_)))
1034
1035
        if isinstance(self.class_weight, dict):
            self._class_weight = {self._class_map[k]: v for k, v in self.class_weight.items()}
1036

1037
1038
        self._classes = self._le.classes_
        self._n_classes = len(self._classes)
1039
1040

        if not callable(eval_metric):
1041
            if isinstance(eval_metric, (str, type(None))):
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
                eval_metric = [eval_metric]
            if self._n_classes > 2:
                for index, metric in enumerate(eval_metric):
                    if metric in {'logloss', 'binary_logloss'}:
                        eval_metric[index] = "multi_logloss"
                    elif metric in {'error', 'binary_error'}:
                        eval_metric[index] = "multi_error"
            else:
                for index, metric in enumerate(eval_metric):
                    if metric in {'logloss', 'multi_logloss'}:
                        eval_metric[index] = 'binary_logloss'
                    elif metric in {'error', 'multi_error'}:
                        eval_metric[index] = 'binary_error'
wxchan's avatar
wxchan committed
1055

1056
1057
        # do not modify args, as it causes errors in model selection tools
        valid_sets = None
wxchan's avatar
wxchan committed
1058
        if eval_set is not None:
1059
1060
            if isinstance(eval_set, tuple):
                eval_set = [eval_set]
1061
            valid_sets = [None] * len(eval_set)
1062
1063
            for i, (valid_x, valid_y) in enumerate(eval_set):
                if valid_x is X and valid_y is y:
1064
                    valid_sets[i] = (valid_x, _y)
1065
                else:
1066
                    valid_sets[i] = (valid_x, self._le.transform(valid_y))
1067

1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
        super().fit(
            X,
            _y,
            sample_weight=sample_weight,
            init_score=init_score,
            eval_set=valid_sets,
            eval_names=eval_names,
            eval_sample_weight=eval_sample_weight,
            eval_class_weight=eval_class_weight,
            eval_init_score=eval_init_score,
            eval_metric=eval_metric,
            feature_name=feature_name,
            categorical_feature=categorical_feature,
            callbacks=callbacks,
            init_model=init_model
        )
wxchan's avatar
wxchan committed
1084
1085
        return self

1086
    _base_doc = LGBMModel.fit.__doc__.replace("self : LGBMModel", "self : LGBMClassifier")  # type: ignore
1087
1088
    _base_doc = (_base_doc[:_base_doc.find('group :')]  # type: ignore
                 + _base_doc[_base_doc.find('eval_set :'):])  # type: ignore
1089
1090
    fit.__doc__ = (_base_doc[:_base_doc.find('eval_group :')]
                   + _base_doc[_base_doc.find('eval_metric :'):])
1091

1092
    def predict(self, X, raw_score=False, start_iteration=0, num_iteration=None,
1093
                pred_leaf=False, pred_contrib=False, **kwargs):
1094
        """Docstring is inherited from the LGBMModel."""
1095
        result = self.predict_proba(X, raw_score, start_iteration, num_iteration,
1096
                                    pred_leaf, pred_contrib, **kwargs)
1097
        if callable(self._objective) or raw_score or pred_leaf or pred_contrib:
1098
1099
1100
1101
            return result
        else:
            class_index = np.argmax(result, axis=1)
            return self._le.inverse_transform(class_index)
wxchan's avatar
wxchan committed
1102

1103
1104
    predict.__doc__ = LGBMModel.predict.__doc__

1105
    def predict_proba(self, X, raw_score=False, start_iteration=0, num_iteration=None,
1106
                      pred_leaf=False, pred_contrib=False, **kwargs):
1107
        """Docstring is set after definition, using a template."""
1108
        result = super().predict(X, raw_score, start_iteration, num_iteration, pred_leaf, pred_contrib, **kwargs)
1109
        if callable(self._objective) and not (raw_score or pred_leaf or pred_contrib):
1110
1111
1112
            _log_warning("Cannot compute class probabilities or labels "
                         "due to the usage of customized objective function.\n"
                         "Returning raw scores instead.")
1113
1114
            return result
        elif self._n_classes > 2 or raw_score or pred_leaf or pred_contrib:
1115
            return result
wxchan's avatar
wxchan committed
1116
        else:
1117
            return np.vstack((1. - result, result)).transpose()
1118

1119
1120
1121
1122
    predict_proba.__doc__ = _lgbmmodel_doc_predict.format(
        description="Return the predicted probability for each class for each sample.",
        X_shape="array-like or sparse matrix of shape = [n_samples, n_features]",
        output_name="predicted_probability",
1123
        predicted_result_shape="array-like of shape = [n_samples] or shape = [n_samples, n_classes]",
1124
1125
1126
1127
        X_leaves_shape="array-like of shape = [n_samples, n_trees] or shape = [n_samples, n_trees * n_classes]",
        X_SHAP_values_shape="array-like of shape = [n_samples, n_features + 1] or shape = [n_samples, (n_features + 1) * n_classes] or list with n_classes length of such objects"
    )

1128
1129
    @property
    def classes_(self):
1130
        """:obj:`array` of shape = [n_classes]: The class label array."""
1131
        if not self.__sklearn_is_fitted__():
1132
1133
            raise LGBMNotFittedError('No classes found. Need to call fit beforehand.')
        return self._classes
1134
1135

    @property
1136
    def n_classes_(self) -> int:
1137
        """:obj:`int`: The number of classes."""
1138
        if not self.__sklearn_is_fitted__():
1139
1140
            raise LGBMNotFittedError('No classes found. Need to call fit beforehand.')
        return self._n_classes
wxchan's avatar
wxchan committed
1141

wxchan's avatar
wxchan committed
1142

wxchan's avatar
wxchan committed
1143
class LGBMRanker(LGBMModel):
1144
1145
1146
1147
1148
1149
1150
1151
    """LightGBM ranker.

    .. warning::

        scikit-learn doesn't support ranking applications yet,
        therefore this class is not really compatible with the sklearn ecosystem.
        Please use this class mainly for training and applying ranking models in common sklearnish way.
    """
wxchan's avatar
wxchan committed
1152

1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
    def fit(
        self,
        X,
        y,
        sample_weight=None,
        init_score=None,
        group=None,
        eval_set=None,
        eval_names=None,
        eval_sample_weight=None,
        eval_init_score=None,
        eval_group=None,
        eval_metric=None,
        eval_at=(1, 2, 3, 4, 5),
        feature_name='auto',
        categorical_feature='auto',
        callbacks=None,
        init_model=None
    ):
1172
        """Docstring is inherited from the LGBMModel."""
1173
        # check group data
Guolin Ke's avatar
Guolin Ke committed
1174
        if group is None:
1175
            raise ValueError("Should set group for ranking task")
wxchan's avatar
wxchan committed
1176
1177

        if eval_set is not None:
Guolin Ke's avatar
Guolin Ke committed
1178
            if eval_group is None:
1179
                raise ValueError("Eval_group cannot be None when eval_set is not None")
Guolin Ke's avatar
Guolin Ke committed
1180
            elif len(eval_group) != len(eval_set):
1181
                raise ValueError("Length of eval_group should be equal to eval_set")
1182
            elif (isinstance(eval_group, dict)
1183
                  and any(i not in eval_group or eval_group[i] is None for i in range(len(eval_group)))
1184
1185
                  or isinstance(eval_group, list)
                  and any(group is None for group in eval_group)):
1186
1187
                raise ValueError("Should set group for all eval datasets for ranking task; "
                                 "if you use dict, the index should start from 0")
1188

1189
        self._eval_at = eval_at
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
        super().fit(
            X,
            y,
            sample_weight=sample_weight,
            init_score=init_score,
            group=group,
            eval_set=eval_set,
            eval_names=eval_names,
            eval_sample_weight=eval_sample_weight,
            eval_init_score=eval_init_score,
            eval_group=eval_group,
            eval_metric=eval_metric,
            feature_name=feature_name,
            categorical_feature=categorical_feature,
            callbacks=callbacks,
            init_model=init_model
        )
wxchan's avatar
wxchan committed
1207
        return self
1208

1209
    _base_doc = LGBMModel.fit.__doc__.replace("self : LGBMModel", "self : LGBMRanker")  # type: ignore
1210
1211
    fit.__doc__ = (_base_doc[:_base_doc.find('eval_class_weight :')]  # type: ignore
                   + _base_doc[_base_doc.find('eval_init_score :'):])  # type: ignore
1212
    _base_doc = fit.__doc__
1213
1214
    _before_feature_name, _feature_name, _after_feature_name = _base_doc.partition('feature_name :')
    fit.__doc__ = f"""{_before_feature_name}eval_at : iterable of int, optional (default=(1, 2, 3, 4, 5))
1215
        The evaluation positions of the specified metric.
1216
    {_feature_name}{_after_feature_name}"""