sklearn.py 55.7 KB
Newer Older
wxchan's avatar
wxchan committed
1
# coding: utf-8
2
"""Scikit-learn wrapper interface for LightGBM."""
3
import copy
4
from inspect import signature
5
from pathlib import Path
6
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
7

wxchan's avatar
wxchan committed
8
import numpy as np
9

10
11
from .basic import (Booster, Dataset, LightGBMError, _choose_param_value, _ConfigAliases, _LGBM_EvalFunctionResultType,
                    _log_warning)
12
from .callback import record_evaluation
13
14
from .compat import (SKLEARN_INSTALLED, LGBMNotFittedError, _LGBMAssertAllFinite, _LGBMCheckArray,
                     _LGBMCheckClassificationTargets, _LGBMCheckSampleWeight, _LGBMCheckXY, _LGBMClassifierBase,
15
16
                     _LGBMComputeSampleWeight, _LGBMCpuCount, _LGBMLabelEncoder, _LGBMModelBase, _LGBMRegressorBase,
                     dt_DataTable, pd_DataFrame)
wxchan's avatar
wxchan committed
17
from .engine import train
18

19
20
21
_LGBM_ScikitCustomObjectiveFunction = Union[
    Callable[
        [np.ndarray, np.ndarray],
22
        Tuple[np.ndarray, np.ndarray]
23
24
25
    ],
    Callable[
        [np.ndarray, np.ndarray, np.ndarray],
26
        Tuple[np.ndarray, np.ndarray]
27
    ],
28
29
30
31
    Callable[
        [np.ndarray, np.ndarray, np.ndarray, np.ndarray],
        Tuple[np.ndarray, np.ndarray]
    ],
32
33
34
35
]
_LGBM_ScikitCustomEvalFunction = Union[
    Callable[
        [np.ndarray, np.ndarray],
36
        Union[_LGBM_EvalFunctionResultType, List[_LGBM_EvalFunctionResultType]]
37
38
39
    ],
    Callable[
        [np.ndarray, np.ndarray, np.ndarray],
40
        Union[_LGBM_EvalFunctionResultType, List[_LGBM_EvalFunctionResultType]]
41
42
43
    ],
    Callable[
        [np.ndarray, np.ndarray, np.ndarray, np.ndarray],
44
        Union[_LGBM_EvalFunctionResultType, List[_LGBM_EvalFunctionResultType]]
45
46
    ],
]
47
48
49
50
51
_LGBM_ScikitEvalMetricType = Union[
    str,
    _LGBM_ScikitCustomEvalFunction,
    List[Union[str, _LGBM_ScikitCustomEvalFunction]]
]
52

wxchan's avatar
wxchan committed
53

54
class _ObjectiveFunctionWrapper:
55
    """Proxy class for objective function."""
56

57
    def __init__(self, func: _LGBM_ScikitCustomObjectiveFunction):
58
        """Construct a proxy class.
59

60
61
        This class transforms objective function to match objective function with signature ``new_func(preds, dataset)``
        as expected by ``lightgbm.engine.train``.
62

63
64
65
        Parameters
        ----------
        func : callable
66
67
68
69
            Expects a callable with following signatures:
            ``func(y_true, y_pred)``,
            ``func(y_true, y_pred, weight)``
            or ``func(y_true, y_pred, weight, group)``
70
71
            and returns (grad, hess):

72
                y_true : numpy 1-D array of shape = [n_samples]
73
                    The target values.
74
                y_pred : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
75
                    The predicted values.
76
77
                    Predicted values are returned before any transformation,
                    e.g. they are raw margin instead of probability of positive class for binary task.
78
79
                weight : numpy 1-D array of shape = [n_samples]
                    The weight of samples. Weights should be non-negative.
80
                group : numpy 1-D array
81
82
83
                    Group/query data.
                    Only used in the learning-to-rank task.
                    sum(group) = n_samples.
84
85
                    For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
                    where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
86
                grad : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape [n_samples, n_classes] (for multi-class task)
87
88
                    The value of the first order derivative (gradient) of the loss
                    with respect to the elements of y_pred for each sample point.
89
                hess : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
90
91
                    The value of the second order derivative (Hessian) of the loss
                    with respect to the elements of y_pred for each sample point.
wxchan's avatar
wxchan committed
92

Nikita Titov's avatar
Nikita Titov committed
93
94
        .. note::

95
            For multi-class task, y_pred is a numpy 2-D array of shape = [n_samples, n_classes],
96
            and grad and hess should be returned in the same format.
97
98
        """
        self.func = func
wxchan's avatar
wxchan committed
99

100
    def __call__(self, preds: np.ndarray, dataset: Dataset) -> Tuple[np.ndarray, np.ndarray]:
101
102
103
104
        """Call passed function with appropriate arguments.

        Parameters
        ----------
105
        preds : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
106
107
108
109
110
111
            The predicted values.
        dataset : Dataset
            The training dataset.

        Returns
        -------
112
        grad : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
113
114
            The value of the first order derivative (gradient) of the loss
            with respect to the elements of preds for each sample point.
115
        hess : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
116
117
            The value of the second order derivative (Hessian) of the loss
            with respect to the elements of preds for each sample point.
118
        """
wxchan's avatar
wxchan committed
119
        labels = dataset.get_label()
120
        argc = len(signature(self.func).parameters)
121
        if argc == 2:
122
            grad, hess = self.func(labels, preds)
123
        elif argc == 3:
124
125
126
            grad, hess = self.func(labels, preds, dataset.get_weight())
        elif argc == 4:
            grad, hess = self.func(labels, preds, dataset.get_weight(), dataset.get_group())
127
        else:
128
            raise TypeError(f"Self-defined objective function should have 2, 3 or 4 arguments, got {argc}")
wxchan's avatar
wxchan committed
129
130
        return grad, hess

wxchan's avatar
wxchan committed
131

132
class _EvalFunctionWrapper:
133
    """Proxy class for evaluation function."""
134

135
    def __init__(self, func: _LGBM_ScikitCustomEvalFunction):
136
        """Construct a proxy class.
137

138
139
        This class transforms evaluation function to match evaluation function with signature ``new_func(preds, dataset)``
        as expected by ``lightgbm.engine.train``.
140

141
142
143
144
145
146
147
148
149
150
        Parameters
        ----------
        func : callable
            Expects a callable with following signatures:
            ``func(y_true, y_pred)``,
            ``func(y_true, y_pred, weight)``
            or ``func(y_true, y_pred, weight, group)``
            and returns (eval_name, eval_result, is_higher_better) or
            list of (eval_name, eval_result, is_higher_better):

151
                y_true : numpy 1-D array of shape = [n_samples]
152
                    The target values.
153
                y_pred : numpy 1-D array of shape = [n_samples] or numpy 2-D array shape = [n_samples, n_classes] (for multi-class task)
154
                    The predicted values.
155
156
                    In case of custom ``objective``, predicted values are returned before any transformation,
                    e.g. they are raw margin instead of probability of positive class for binary task in this case.
157
                weight : numpy 1-D array of shape = [n_samples]
158
                    The weight of samples. Weights should be non-negative.
159
                group : numpy 1-D array
160
161
162
                    Group/query data.
                    Only used in the learning-to-rank task.
                    sum(group) = n_samples.
163
164
                    For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
                    where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
165
                eval_name : str
Andrew Ziem's avatar
Andrew Ziem committed
166
                    The name of evaluation function (without whitespace).
167
168
169
170
171
172
                eval_result : float
                    The eval result.
                is_higher_better : bool
                    Is eval result higher better, e.g. AUC is ``is_higher_better``.
        """
        self.func = func
173

174
    def __call__(self, preds: np.ndarray, dataset: Dataset) -> Tuple[str, float, bool]:
175
        """Call passed function with appropriate arguments.
176

177
178
        Parameters
        ----------
179
        preds : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
180
181
182
183
184
185
            The predicted values.
        dataset : Dataset
            The training dataset.

        Returns
        -------
186
        eval_name : str
Andrew Ziem's avatar
Andrew Ziem committed
187
            The name of evaluation function (without whitespace).
188
189
190
191
192
        eval_result : float
            The eval result.
        is_higher_better : bool
            Is eval result higher better, e.g. AUC is ``is_higher_better``.
        """
193
        labels = dataset.get_label()
194
        argc = len(signature(self.func).parameters)
195
        if argc == 2:
196
            return self.func(labels, preds)
197
        elif argc == 3:
198
            return self.func(labels, preds, dataset.get_weight())
199
        elif argc == 4:
200
            return self.func(labels, preds, dataset.get_weight(), dataset.get_group())
201
        else:
202
            raise TypeError(f"Self-defined eval function should have 2, 3 or 4 arguments, got {argc}")
203

wxchan's avatar
wxchan committed
204

205
206
207
208
209
210
211
212
213
214
215
216
217
218
# documentation templates for LGBMModel methods are shared between the classes in
# this module and those in the ``dask`` module

_lgbmmodel_doc_fit = (
    """
    Build a gradient boosting model from the training set (X, y).

    Parameters
    ----------
    X : {X_shape}
        Input feature matrix.
    y : {y_shape}
        The target values (class labels in classification, real numbers in regression).
    sample_weight : {sample_weight_shape}
219
        Weights of training data. Weights should be non-negative.
220
    init_score : {init_score_shape}
221
222
223
224
225
226
227
228
229
        Init score of training data.
    group : {group_shape}
        Group/query data.
        Only used in the learning-to-rank task.
        sum(group) = n_samples.
        For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
        where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
    eval_set : list or None, optional (default=None)
        A list of (X, y) tuple pairs to use as validation sets.
230
    eval_names : list of str, or None, optional (default=None)
231
        Names of eval_set.
232
    eval_sample_weight : {eval_sample_weight_shape}
233
        Weights of eval data. Weights should be non-negative.
234
235
    eval_class_weight : list or None, optional (default=None)
        Class weights of eval data.
236
    eval_init_score : {eval_init_score_shape}
237
        Init score of eval data.
238
    eval_group : {eval_group_shape}
239
        Group data of eval data.
240
241
    eval_metric : str, callable, list or None, optional (default=None)
        If str, it should be a built-in evaluation metric to use.
242
243
244
245
        If callable, it should be a custom evaluation metric, see note below for more details.
        If list, it can be a list of built-in metrics, a list of custom evaluation metrics, or a mix of both.
        In either case, the ``metric`` from the model parameters will be evaluated and used as well.
        Default: 'l2' for LGBMRegressor, 'logloss' for LGBMClassifier, 'ndcg' for LGBMRanker.
246
    feature_name : list of str, or 'auto', optional (default='auto')
247
248
        Feature names.
        If 'auto' and data is pandas DataFrame, data columns names are used.
249
    categorical_feature : list of str or int, or 'auto', optional (default='auto')
250
251
        Categorical features.
        If list of int, interpreted as indices.
252
        If list of str, interpreted as feature names (need to specify ``feature_name`` as well).
253
        If 'auto' and data is pandas DataFrame, pandas unordered categorical columns are used.
254
        All values in categorical features will be cast to int32 and thus should be less than int32 max value (2147483647).
255
256
257
        Large values could be memory consuming. Consider using consecutive integers starting from zero.
        All negative values in categorical features will be treated as missing values.
        The output cannot be monotonically constrained with respect to a categorical feature.
258
        Floating point numbers in categorical features will be rounded towards 0.
259
    callbacks : list of callable, or None, optional (default=None)
260
261
        List of callback functions that are applied at each iteration.
        See Callbacks in Python API for more information.
262
    init_model : str, pathlib.Path, Booster, LGBMModel or None, optional (default=None)
263
264
265
266
        Filename of LightGBM model, Booster instance or LGBMModel instance used for continue training.

    Returns
    -------
267
    self : LGBMModel
268
269
270
271
272
273
274
275
276
277
278
279
280
        Returns self.
    """
)

_lgbmmodel_doc_custom_eval_note = """
    Note
    ----
    Custom eval function expects a callable with following signatures:
    ``func(y_true, y_pred)``, ``func(y_true, y_pred, weight)`` or
    ``func(y_true, y_pred, weight, group)``
    and returns (eval_name, eval_result, is_higher_better) or
    list of (eval_name, eval_result, is_higher_better):

281
        y_true : numpy 1-D array of shape = [n_samples]
282
            The target values.
283
        y_pred : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
284
            The predicted values.
285
286
            In case of custom ``objective``, predicted values are returned before any transformation,
            e.g. they are raw margin instead of probability of positive class for binary task in this case.
287
        weight : numpy 1-D array of shape = [n_samples]
288
            The weight of samples. Weights should be non-negative.
289
        group : numpy 1-D array
290
291
292
293
294
            Group/query data.
            Only used in the learning-to-rank task.
            sum(group) = n_samples.
            For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
            where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
295
        eval_name : str
Andrew Ziem's avatar
Andrew Ziem committed
296
            The name of evaluation function (without whitespace).
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
        eval_result : float
            The eval result.
        is_higher_better : bool
            Is eval result higher better, e.g. AUC is ``is_higher_better``.
"""

_lgbmmodel_doc_predict = (
    """
    {description}

    Parameters
    ----------
    X : {X_shape}
        Input features matrix.
    raw_score : bool, optional (default=False)
        Whether to predict raw scores.
    start_iteration : int, optional (default=0)
        Start index of the iteration to predict.
        If <= 0, starts from the first iteration.
    num_iteration : int or None, optional (default=None)
        Total number of iterations used in the prediction.
        If None, if the best iteration exists and start_iteration <= 0, the best iteration is used;
        otherwise, all iterations from ``start_iteration`` are used (no limits).
        If <= 0, all iterations from ``start_iteration`` are used (no limits).
    pred_leaf : bool, optional (default=False)
        Whether to predict leaf index.
    pred_contrib : bool, optional (default=False)
        Whether to predict feature contributions.

        .. note::

            If you want to get more explanations for your model's predictions using SHAP values,
            like SHAP interaction values,
            you can install the shap package (https://github.com/slundberg/shap).
            Note that unlike the shap package, with ``pred_contrib`` we return a matrix with an extra
            column, where the last column is the expected value.

334
335
336
    validate_features : bool, optional (default=False)
        If True, ensure that the features used to predict match the ones used to train.
        Used only if data is pandas DataFrame.
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
    **kwargs
        Other parameters for the prediction.

    Returns
    -------
    {output_name} : {predicted_result_shape}
        The predicted values.
    X_leaves : {X_leaves_shape}
        If ``pred_leaf=True``, the predicted leaf of every tree for each sample.
    X_SHAP_values : {X_SHAP_values_shape}
        If ``pred_contrib=True``, the feature contributions for each sample.
    """
)


352
353
class LGBMModel(_LGBMModelBase):
    """Implementation of the scikit-learn API for LightGBM."""
wxchan's avatar
wxchan committed
354

355
356
357
358
359
360
361
362
    def __init__(
        self,
        boosting_type: str = 'gbdt',
        num_leaves: int = 31,
        max_depth: int = -1,
        learning_rate: float = 0.1,
        n_estimators: int = 100,
        subsample_for_bin: int = 200000,
363
        objective: Optional[Union[str, _LGBM_ScikitCustomObjectiveFunction]] = None,
364
365
366
367
368
369
370
371
372
373
        class_weight: Optional[Union[Dict, str]] = None,
        min_split_gain: float = 0.,
        min_child_weight: float = 1e-3,
        min_child_samples: int = 20,
        subsample: float = 1.,
        subsample_freq: int = 0,
        colsample_bytree: float = 1.,
        reg_alpha: float = 0.,
        reg_lambda: float = 0.,
        random_state: Optional[Union[int, np.random.RandomState]] = None,
374
        n_jobs: Optional[int] = None,
375
376
377
        importance_type: str = 'split',
        **kwargs
    ):
378
        r"""Construct a gradient boosting model.
wxchan's avatar
wxchan committed
379
380
381

        Parameters
        ----------
382
        boosting_type : str, optional (default='gbdt')
383
384
385
386
387
            'gbdt', traditional Gradient Boosting Decision Tree.
            'dart', Dropouts meet Multiple Additive Regression Trees.
            'goss', Gradient-based One-Side Sampling.
            'rf', Random Forest.
        num_leaves : int, optional (default=31)
wxchan's avatar
wxchan committed
388
            Maximum tree leaves for base learners.
389
        max_depth : int, optional (default=-1)
390
            Maximum tree depth for base learners, <=0 means no limit.
391
        learning_rate : float, optional (default=0.1)
392
            Boosting learning rate.
393
394
395
            You can use ``callbacks`` parameter of ``fit`` method to shrink/adapt learning rate
            in training using ``reset_parameter`` callback.
            Note, that this will ignore the ``learning_rate`` argument in training.
396
        n_estimators : int, optional (default=100)
wxchan's avatar
wxchan committed
397
            Number of boosted trees to fit.
398
        subsample_for_bin : int, optional (default=200000)
wxchan's avatar
wxchan committed
399
            Number of samples for constructing bins.
400
        objective : str, callable or None, optional (default=None)
wxchan's avatar
wxchan committed
401
402
            Specify the learning task and the corresponding learning objective or
            a custom objective function to be used (see note below).
403
            Default: 'regression' for LGBMRegressor, 'binary' or 'multiclass' for LGBMClassifier, 'lambdarank' for LGBMRanker.
404
405
406
407
        class_weight : dict, 'balanced' or None, optional (default=None)
            Weights associated with classes in the form ``{class_label: weight}``.
            Use this parameter only for multi-class classification task;
            for binary classification task you may use ``is_unbalance`` or ``scale_pos_weight`` parameters.
408
409
410
            Note, that the usage of all these parameters will result in poor estimates of the individual class probabilities.
            You may want to consider performing probability calibration
            (https://scikit-learn.org/stable/modules/calibration.html) of your model.
411
412
413
            The 'balanced' mode uses the values of y to automatically adjust weights
            inversely proportional to class frequencies in the input data as ``n_samples / (n_classes * np.bincount(y))``.
            If None, all classes are supposed to have weight one.
414
            Note, that these weights will be multiplied with ``sample_weight`` (passed through the ``fit`` method)
415
            if ``sample_weight`` is specified.
416
        min_split_gain : float, optional (default=0.)
wxchan's avatar
wxchan committed
417
            Minimum loss reduction required to make a further partition on a leaf node of the tree.
418
        min_child_weight : float, optional (default=1e-3)
419
            Minimum sum of instance weight (Hessian) needed in a child (leaf).
420
        min_child_samples : int, optional (default=20)
421
            Minimum number of data needed in a child (leaf).
422
        subsample : float, optional (default=1.)
wxchan's avatar
wxchan committed
423
            Subsample ratio of the training instance.
424
        subsample_freq : int, optional (default=0)
Andrew Ziem's avatar
Andrew Ziem committed
425
            Frequency of subsample, <=0 means no enable.
426
        colsample_bytree : float, optional (default=1.)
wxchan's avatar
wxchan committed
427
            Subsample ratio of columns when constructing each tree.
428
        reg_alpha : float, optional (default=0.)
429
            L1 regularization term on weights.
430
        reg_lambda : float, optional (default=0.)
431
            L2 regularization term on weights.
432
        random_state : int, RandomState object or None, optional (default=None)
wxchan's avatar
wxchan committed
433
            Random number seed.
434
435
436
            If int, this number is used to seed the C++ code.
            If RandomState object (numpy), a random integer is picked based on its state to seed the C++ code.
            If None, default seeds in C++ code are used.
437
438
439
440
441
442
443
444
445
446
447
448
        n_jobs : int or None, optional (default=None)
            Number of parallel threads to use for training (can be changed at prediction time by
            passing it as an extra keyword argument).

            For better performance, it is recommended to set this to the number of physical cores
            in the CPU.

            Negative integers are interpreted as following joblib's formula (n_cpus + 1 + n_jobs), just like
            scikit-learn (so e.g. -1 means using all threads). A value of zero corresponds the default number of
            threads configured for OpenMP in the system. A value of ``None`` (the default) corresponds
            to using the number of physical cores in the system (its correct detection requires
            either the ``joblib`` or the ``psutil`` util libraries to be installed).
449
        importance_type : str, optional (default='split')
450
            The type of feature importance to be filled into ``feature_importances_``.
451
452
453
454
            If 'split', result contains numbers of times the feature is used in a model.
            If 'gain', result contains total gains of splits which use the feature.
        **kwargs
            Other parameters for the model.
wxchan's avatar
wxchan committed
455
            Check http://lightgbm.readthedocs.io/en/latest/Parameters.html for more parameters.
456

Nikita Titov's avatar
Nikita Titov committed
457
458
459
            .. warning::

                \*\*kwargs is not supported in sklearn, it may cause unexpected issues.
wxchan's avatar
wxchan committed
460
461
462

        Note
        ----
463
464
        A custom objective function can be provided for the ``objective`` parameter.
        In this case, it should have the signature
465
466
467
        ``objective(y_true, y_pred) -> grad, hess``,
        ``objective(y_true, y_pred, weight) -> grad, hess``
        or ``objective(y_true, y_pred, weight, group) -> grad, hess``:
wxchan's avatar
wxchan committed
468

469
            y_true : numpy 1-D array of shape = [n_samples]
470
                The target values.
471
            y_pred : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
472
                The predicted values.
473
474
                Predicted values are returned before any transformation,
                e.g. they are raw margin instead of probability of positive class for binary task.
475
476
            weight : numpy 1-D array of shape = [n_samples]
                The weight of samples. Weights should be non-negative.
477
            group : numpy 1-D array
478
479
480
                Group/query data.
                Only used in the learning-to-rank task.
                sum(group) = n_samples.
481
482
                For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
                where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
483
            grad : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
484
485
                The value of the first order derivative (gradient) of the loss
                with respect to the elements of y_pred for each sample point.
486
            hess : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
487
488
                The value of the second order derivative (Hessian) of the loss
                with respect to the elements of y_pred for each sample point.
wxchan's avatar
wxchan committed
489

490
        For multi-class task, y_pred is a numpy 2-D array of shape = [n_samples, n_classes],
491
        and grad and hess should be returned in the same format.
wxchan's avatar
wxchan committed
492
        """
wxchan's avatar
wxchan committed
493
        if not SKLEARN_INSTALLED:
494
495
            raise LightGBMError('scikit-learn is required for lightgbm.sklearn. '
                                'You must install scikit-learn and restart your session to use this module.')
wxchan's avatar
wxchan committed
496

497
        self.boosting_type = boosting_type
498
        self.objective = objective
wxchan's avatar
wxchan committed
499
500
501
502
        self.num_leaves = num_leaves
        self.max_depth = max_depth
        self.learning_rate = learning_rate
        self.n_estimators = n_estimators
wxchan's avatar
wxchan committed
503
        self.subsample_for_bin = subsample_for_bin
wxchan's avatar
wxchan committed
504
505
506
507
508
509
510
511
        self.min_split_gain = min_split_gain
        self.min_child_weight = min_child_weight
        self.min_child_samples = min_child_samples
        self.subsample = subsample
        self.subsample_freq = subsample_freq
        self.colsample_bytree = colsample_bytree
        self.reg_alpha = reg_alpha
        self.reg_lambda = reg_lambda
512
513
        self.random_state = random_state
        self.n_jobs = n_jobs
514
        self.importance_type = importance_type
515
        self._Booster: Optional[Booster] = None
516
517
518
        self._evals_result = None
        self._best_score = None
        self._best_iteration = None
519
        self._other_params: Dict[str, Any] = {}
520
        self._objective = objective
521
        self.class_weight = class_weight
522
523
        self._class_weight = None
        self._class_map = None
524
        self._n_features = None
525
        self._n_features_in = None
526
527
        self._classes = None
        self._n_classes = None
528
        self.set_params(**kwargs)
wxchan's avatar
wxchan committed
529

530
    def _more_tags(self) -> Dict[str, Any]:
531
532
533
534
535
536
537
538
539
540
        return {
            'allow_nan': True,
            'X_types': ['2darray', 'sparse', '1dlabels'],
            '_xfail_checks': {
                'check_no_attributes_set_in_init':
                'scikit-learn incorrectly asserts that private attributes '
                'cannot be set in __init__: '
                '(see https://github.com/microsoft/LightGBM/issues/2628)'
            }
        }
Nikita Titov's avatar
Nikita Titov committed
541

542
543
544
    def __sklearn_is_fitted__(self) -> bool:
        return getattr(self, "fitted_", False)

545
    def get_params(self, deep: bool = True) -> Dict[str, Any]:
546
547
548
549
550
551
552
553
554
555
556
557
558
        """Get parameters for this estimator.

        Parameters
        ----------
        deep : bool, optional (default=True)
            If True, will return the parameters for this estimator and
            contained subobjects that are estimators.

        Returns
        -------
        params : dict
            Parameter names mapped to their values.
        """
559
        params = super().get_params(deep=deep)
560
        params.update(self._other_params)
wxchan's avatar
wxchan committed
561
562
        return params

563
    def set_params(self, **params: Any) -> "LGBMModel":
564
565
566
567
568
569
570
571
572
573
574
575
        """Set the parameters of this estimator.

        Parameters
        ----------
        **params
            Parameter names with their new values.

        Returns
        -------
        self : object
            Returns self.
        """
wxchan's avatar
wxchan committed
576
577
        for key, value in params.items():
            setattr(self, key, value)
578
579
            if hasattr(self, f"_{key}"):
                setattr(self, f"_{key}", value)
580
            self._other_params[key] = value
wxchan's avatar
wxchan committed
581
        return self
wxchan's avatar
wxchan committed
582

583
584
585
586
587
588
589
590
591
592
593
594
595
596
    def _process_params(self, stage: str) -> Dict[str, Any]:
        """Process the parameters of this estimator based on its type, parameter aliases, etc.

        Parameters
        ----------
        stage : str
            Name of the stage (can be ``fit`` or ``predict``) this method is called from.

        Returns
        -------
        processed_params : dict
            Processed parameter names mapped to their values.
        """
        assert stage in {"fit", "predict"}
597
598
599
600
601
        params = self.get_params()

        params.pop('objective', None)
        for alias in _ConfigAliases.get('objective'):
            if alias in params:
602
                obj = params.pop(alias)
603
                _log_warning(f"Found '{alias}' in params. Will use it instead of 'objective' argument")
604
605
606
607
608
609
610
611
612
613
614
615
616
                if stage == "fit":
                    self._objective = obj
        if stage == "fit":
            if self._objective is None:
                if isinstance(self, LGBMRegressor):
                    self._objective = "regression"
                elif isinstance(self, LGBMClassifier):
                    if self._n_classes > 2:
                        self._objective = "multiclass"
                    else:
                        self._objective = "binary"
                elif isinstance(self, LGBMRanker):
                    self._objective = "lambdarank"
617
                else:
618
                    raise ValueError("Unknown LGBMModel type.")
619
        if callable(self._objective):
620
            if stage == "fit":
621
622
623
                params['objective'] = _ObjectiveFunctionWrapper(self._objective)
            else:
                params['objective'] = 'None'
624
        else:
625
            params['objective'] = self._objective
626

627
        params.pop('importance_type', None)
wxchan's avatar
wxchan committed
628
        params.pop('n_estimators', None)
629
        params.pop('class_weight', None)
630

631
632
        if isinstance(params['random_state'], np.random.RandomState):
            params['random_state'] = params['random_state'].randint(np.iinfo(np.int32).max)
633
        if self._n_classes is not None and self._n_classes > 2:
634
635
            for alias in _ConfigAliases.get('num_class'):
                params.pop(alias, None)
636
637
            params['num_class'] = self._n_classes
        if hasattr(self, '_eval_at'):
638
            eval_at = self._eval_at
639
            for alias in _ConfigAliases.get('eval_at'):
640
641
642
643
                if alias in params:
                    _log_warning(f"Found '{alias}' in params. Will use it instead of 'eval_at' argument")
                    eval_at = params.pop(alias)
            params['eval_at'] = eval_at
wxchan's avatar
wxchan committed
644

645
        # register default metric for consistency with callable eval_metric case
646
        original_metric = self._objective if isinstance(self._objective, str) else None
647
648
649
650
651
652
653
654
655
656
        if original_metric is None:
            # try to deduce from class instance
            if isinstance(self, LGBMRegressor):
                original_metric = "l2"
            elif isinstance(self, LGBMClassifier):
                original_metric = "multi_logloss" if self._n_classes > 2 else "binary_logloss"
            elif isinstance(self, LGBMRanker):
                original_metric = "ndcg"

        # overwrite default metric by explicitly set metric
657
        params = _choose_param_value("metric", params, original_metric)
658

659
660
661
662
663
664
        # use joblib conventions for negative n_jobs, just like scikit-learn
        # at predict time, this is handled later due to the order of parameter updates
        if stage == "fit":
            params = _choose_param_value("num_threads", params, self.n_jobs)
            params["num_threads"] = self._process_n_jobs(params["num_threads"])

665
666
        return params

667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
    def _process_n_jobs(self, n_jobs: Optional[int]) -> int:
        """Convert special values of n_jobs to their actual values according to the formulas that apply.

        Parameters
        ----------
        n_jobs : int or None
            The original value of n_jobs, potentially having special values such as 'None' or
            negative integers.

        Returns
        -------
        n_jobs : int
            The value of n_jobs with special values converted to actual number of threads.
        """
        if n_jobs is None:
            n_jobs = _LGBMCpuCount(only_physical_cores=True)
        elif n_jobs < 0:
            n_jobs = max(_LGBMCpuCount(only_physical_cores=False) + 1 + n_jobs, 1)
        return n_jobs

687
688
689
690
691
692
693
694
    def fit(
        self,
        X,
        y,
        sample_weight=None,
        init_score=None,
        group=None,
        eval_set=None,
695
        eval_names: Optional[List[str]] = None,
696
697
698
699
        eval_sample_weight=None,
        eval_class_weight=None,
        eval_init_score=None,
        eval_group=None,
700
        eval_metric: Optional[_LGBM_ScikitEvalMetricType] = None,
701
702
703
        feature_name='auto',
        categorical_feature='auto',
        callbacks=None,
704
        init_model: Optional[Union[str, Path, Booster, "LGBMModel"]] = None
705
    ):
706
707
708
709
710
711
712
713
714
715
716
717
718
        """Docstring is set after definition, using a template."""
        params = self._process_params(stage="fit")

        # Do not modify original args in fit function
        # Refer to https://github.com/microsoft/LightGBM/pull/2619
        eval_metric_list = copy.deepcopy(eval_metric)
        if not isinstance(eval_metric_list, list):
            eval_metric_list = [eval_metric_list]

        # Separate built-in from callable evaluation metrics
        eval_metrics_callable = [_EvalFunctionWrapper(f) for f in eval_metric_list if callable(f)]
        eval_metrics_builtin = [m for m in eval_metric_list if isinstance(m, str)]

719
        # concatenate metric from params (or default if not provided in params) and eval_metric
720
721
        params['metric'] = [params['metric']] if isinstance(params['metric'], (str, type(None))) else params['metric']
        params['metric'] = [e for e in eval_metrics_builtin if e not in params['metric']] + params['metric']
722
        params['metric'] = [metric for metric in params['metric'] if metric is not None]
wxchan's avatar
wxchan committed
723

724
        if not isinstance(X, (pd_DataFrame, dt_DataTable)):
725
            _X, _y = _LGBMCheckXY(X, y, accept_sparse=True, force_all_finite=False, ensure_min_samples=2)
726
727
            if sample_weight is not None:
                sample_weight = _LGBMCheckSampleWeight(sample_weight, _X)
728
729
        else:
            _X, _y = X, y
730

731
732
733
734
        if self._class_weight is None:
            self._class_weight = self.class_weight
        if self._class_weight is not None:
            class_sample_weight = _LGBMComputeSampleWeight(self._class_weight, y)
735
736
737
738
            if sample_weight is None or len(sample_weight) == 0:
                sample_weight = class_sample_weight
            else:
                sample_weight = np.multiply(sample_weight, class_sample_weight)
739

740
        self._n_features = _X.shape[1]
741
742
        # copy for consistency
        self._n_features_in = self._n_features
743

744
745
746
        train_set = Dataset(data=_X, label=_y, weight=sample_weight, group=group,
                            init_score=init_score, categorical_feature=categorical_feature,
                            params=params)
Guolin Ke's avatar
Guolin Ke committed
747
748
749

        valid_sets = []
        if eval_set is not None:
750

751
            def _get_meta_data(collection, name, i):
752
753
754
755
756
757
758
                if collection is None:
                    return None
                elif isinstance(collection, list):
                    return collection[i] if len(collection) > i else None
                elif isinstance(collection, dict):
                    return collection.get(i, None)
                else:
759
                    raise TypeError(f"{name} should be dict or list")
760

Guolin Ke's avatar
Guolin Ke committed
761
762
763
            if isinstance(eval_set, tuple):
                eval_set = [eval_set]
            for i, valid_data in enumerate(eval_set):
764
                # reduce cost for prediction training data
Guolin Ke's avatar
Guolin Ke committed
765
766
767
                if valid_data[0] is X and valid_data[1] is y:
                    valid_set = train_set
                else:
768
769
770
771
772
773
                    valid_weight = _get_meta_data(eval_sample_weight, 'eval_sample_weight', i)
                    valid_class_weight = _get_meta_data(eval_class_weight, 'eval_class_weight', i)
                    if valid_class_weight is not None:
                        if isinstance(valid_class_weight, dict) and self._class_map is not None:
                            valid_class_weight = {self._class_map[k]: v for k, v in valid_class_weight.items()}
                        valid_class_sample_weight = _LGBMComputeSampleWeight(valid_class_weight, valid_data[1])
774
775
776
777
                        if valid_weight is None or len(valid_weight) == 0:
                            valid_weight = valid_class_sample_weight
                        else:
                            valid_weight = np.multiply(valid_weight, valid_class_sample_weight)
778
779
                    valid_init_score = _get_meta_data(eval_init_score, 'eval_init_score', i)
                    valid_group = _get_meta_data(eval_group, 'eval_group', i)
780
781
782
783
                    valid_set = Dataset(data=valid_data[0], label=valid_data[1], weight=valid_weight,
                                        group=valid_group, init_score=valid_init_score,
                                        categorical_feature='auto', params=params)

Guolin Ke's avatar
Guolin Ke committed
784
785
                valid_sets.append(valid_set)

786
787
788
        if isinstance(init_model, LGBMModel):
            init_model = init_model.booster_

789
790
791
        if callbacks is None:
            callbacks = []
        else:
792
            callbacks = copy.copy(callbacks)  # don't use deepcopy here to allow non-serializable objects
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807

        evals_result = {}
        callbacks.append(record_evaluation(evals_result))

        self._Booster = train(
            params=params,
            train_set=train_set,
            num_boost_round=self.n_estimators,
            valid_sets=valid_sets,
            valid_names=eval_names,
            feval=eval_metrics_callable,
            init_model=init_model,
            feature_name=feature_name,
            callbacks=callbacks
        )
wxchan's avatar
wxchan committed
808

809
        self._evals_result = evals_result
810
        self._best_iteration = self._Booster.best_iteration
811
        self._best_score = self._Booster.best_score
wxchan's avatar
wxchan committed
812

813
814
        self.fitted_ = True

wxchan's avatar
wxchan committed
815
        # free dataset
816
        self._Booster.free_dataset()
wxchan's avatar
wxchan committed
817
        del train_set, valid_sets
wxchan's avatar
wxchan committed
818
819
        return self

820
821
822
823
    fit.__doc__ = _lgbmmodel_doc_fit.format(
        X_shape="array-like or sparse matrix of shape = [n_samples, n_features]",
        y_shape="array-like of shape = [n_samples]",
        sample_weight_shape="array-like of shape = [n_samples] or None, optional (default=None)",
824
        init_score_shape="array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task) or shape = [n_samples, n_classes] (for multi-class task) or None, optional (default=None)",
825
        group_shape="array-like or None, optional (default=None)",
826
827
828
        eval_sample_weight_shape="list of array, or None, optional (default=None)",
        eval_init_score_shape="list of array, or None, optional (default=None)",
        eval_group_shape="list of array, or None, optional (default=None)"
829
830
    ) + "\n\n" + _lgbmmodel_doc_custom_eval_note

831
832
833
834
835
836
837
838
839
840
841
    def predict(
        self,
        X,
        raw_score: bool = False,
        start_iteration: int = 0,
        num_iteration: Optional[int] = None,
        pred_leaf: bool = False,
        pred_contrib: bool = False,
        validate_features: bool = False,
        **kwargs: Any
    ):
842
        """Docstring is set after definition, using a template."""
843
        if not self.__sklearn_is_fitted__():
844
            raise LGBMNotFittedError("Estimator not fitted, call fit before exploiting the model.")
845
        if not isinstance(X, (pd_DataFrame, dt_DataTable)):
846
            X = _LGBMCheckArray(X, accept_sparse=True, force_all_finite=False)
847
848
849
        n_features = X.shape[1]
        if self._n_features != n_features:
            raise ValueError("Number of features of the model must "
850
851
                             f"match the input. Model n_features_ is {self._n_features} and "
                             f"input n_features is {n_features}")
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
        # retrive original params that possibly can be used in both training and prediction
        # and then overwrite them (considering aliases) with params that were passed directly in prediction
        predict_params = self._process_params(stage="predict")
        for alias in _ConfigAliases.get_by_alias(
            "data",
            "X",
            "raw_score",
            "start_iteration",
            "num_iteration",
            "pred_leaf",
            "pred_contrib",
            *kwargs.keys()
        ):
            predict_params.pop(alias, None)
        predict_params.update(kwargs)
867
868
869

        # number of threads can have values with special meaning which is only applied
        # in the scikit-learn interface, these should not reach the c++ side as-is
870
871
        predict_params = _choose_param_value("num_threads", predict_params, self.n_jobs)
        predict_params["num_threads"] = self._process_n_jobs(predict_params["num_threads"])
872

873
        return self._Booster.predict(X, raw_score=raw_score, start_iteration=start_iteration, num_iteration=num_iteration,
874
875
                                     pred_leaf=pred_leaf, pred_contrib=pred_contrib, validate_features=validate_features,
                                     **predict_params)
wxchan's avatar
wxchan committed
876

877
878
879
880
881
882
883
884
885
    predict.__doc__ = _lgbmmodel_doc_predict.format(
        description="Return the predicted value for each sample.",
        X_shape="array-like or sparse matrix of shape = [n_samples, n_features]",
        output_name="predicted_result",
        predicted_result_shape="array-like of shape = [n_samples] or shape = [n_samples, n_classes]",
        X_leaves_shape="array-like of shape = [n_samples, n_trees] or shape = [n_samples, n_trees * n_classes]",
        X_SHAP_values_shape="array-like of shape = [n_samples, n_features + 1] or shape = [n_samples, (n_features + 1) * n_classes] or list with n_classes length of such objects"
    )

886
    @property
887
    def n_features_(self) -> int:
888
        """:obj:`int`: The number of features of fitted model."""
889
        if not self.__sklearn_is_fitted__():
890
891
892
            raise LGBMNotFittedError('No n_features found. Need to call fit beforehand.')
        return self._n_features

893
    @property
894
    def n_features_in_(self) -> int:
895
        """:obj:`int`: The number of features of fitted model."""
896
        if not self.__sklearn_is_fitted__():
897
898
899
            raise LGBMNotFittedError('No n_features_in found. Need to call fit beforehand.')
        return self._n_features_in

900
901
    @property
    def best_score_(self):
902
        """:obj:`dict`: The best score of fitted model."""
903
        if not self.__sklearn_is_fitted__():
904
905
906
907
            raise LGBMNotFittedError('No best_score found. Need to call fit beforehand.')
        return self._best_score

    @property
908
    def best_iteration_(self) -> int:
909
        """:obj:`int`: The best iteration of fitted model if ``early_stopping()`` callback has been specified."""
910
        if not self.__sklearn_is_fitted__():
911
            raise LGBMNotFittedError('No best_iteration found. Need to call fit with early_stopping callback beforehand.')
912
913
914
        return self._best_iteration

    @property
915
    def objective_(self) -> Union[str, _LGBM_ScikitCustomObjectiveFunction]:
916
        """:obj:`str` or :obj:`callable`: The concrete objective used while fitting this model."""
917
        if not self.__sklearn_is_fitted__():
918
919
920
            raise LGBMNotFittedError('No objective found. Need to call fit beforehand.')
        return self._objective

921
922
923
924
925
926
927
928
929
    @property
    def n_estimators_(self) -> int:
        """:obj:`int`: True number of boosting iterations performed.

        This might be less than parameter ``n_estimators`` if early stopping was enabled or
        if boosting stopped early due to limits on complexity like ``min_gain_to_split``.
        """
        if not self.__sklearn_is_fitted__():
            raise LGBMNotFittedError('No n_estimators found. Need to call fit beforehand.')
930
        return self._Booster.current_iteration()  # type: ignore
931
932
933
934
935
936
937
938
939
940

    @property
    def n_iter_(self) -> int:
        """:obj:`int`: True number of boosting iterations performed.

        This might be less than parameter ``n_estimators`` if early stopping was enabled or
        if boosting stopped early due to limits on complexity like ``min_gain_to_split``.
        """
        if not self.__sklearn_is_fitted__():
            raise LGBMNotFittedError('No n_iter found. Need to call fit beforehand.')
941
        return self._Booster.current_iteration()  # type: ignore
942

943
944
    @property
    def booster_(self):
945
        """Booster: The underlying Booster of this model."""
946
        if not self.__sklearn_is_fitted__():
947
            raise LGBMNotFittedError('No booster found. Need to call fit beforehand.')
948
        return self._Booster
wxchan's avatar
wxchan committed
949

950
951
    @property
    def evals_result_(self):
952
        """:obj:`dict`: The evaluation results if validation sets have been specified."""
953
        if not self.__sklearn_is_fitted__():
954
955
            raise LGBMNotFittedError('No results found. Need to call fit with eval_set beforehand.')
        return self._evals_result
956
957

    @property
958
    def feature_importances_(self):
959
        """:obj:`array` of shape = [n_features]: The feature importances (the higher, the more important).
960

Nikita Titov's avatar
Nikita Titov committed
961
962
963
964
        .. note::

            ``importance_type`` attribute is passed to the function
            to configure the type of importance values to be extracted.
965
        """
966
        if not self.__sklearn_is_fitted__():
967
            raise LGBMNotFittedError('No feature_importances found. Need to call fit beforehand.')
968
        return self._Booster.feature_importance(importance_type=self.importance_type)
wxchan's avatar
wxchan committed
969

970
971
    @property
    def feature_name_(self):
972
        """:obj:`array` of shape = [n_features]: The names of features."""
973
        if not self.__sklearn_is_fitted__():
974
975
976
            raise LGBMNotFittedError('No feature_name found. Need to call fit beforehand.')
        return self._Booster.feature_name()

wxchan's avatar
wxchan committed
977

978
class LGBMRegressor(_LGBMRegressorBase, LGBMModel):
979
    """LightGBM regressor."""
wxchan's avatar
wxchan committed
980

981
982
983
984
985
986
987
    def fit(
        self,
        X,
        y,
        sample_weight=None,
        init_score=None,
        eval_set=None,
988
        eval_names: Optional[List[str]] = None,
989
990
        eval_sample_weight=None,
        eval_init_score=None,
991
        eval_metric: Optional[_LGBM_ScikitEvalMetricType] = None,
992
993
994
        feature_name='auto',
        categorical_feature='auto',
        callbacks=None,
995
        init_model: Optional[Union[str, Path, Booster, LGBMModel]] = None
996
    ):
997
        """Docstring is inherited from the LGBMModel."""
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
        super().fit(
            X,
            y,
            sample_weight=sample_weight,
            init_score=init_score,
            eval_set=eval_set,
            eval_names=eval_names,
            eval_sample_weight=eval_sample_weight,
            eval_init_score=eval_init_score,
            eval_metric=eval_metric,
            feature_name=feature_name,
            categorical_feature=categorical_feature,
            callbacks=callbacks,
            init_model=init_model
        )
Guolin Ke's avatar
Guolin Ke committed
1013
1014
        return self

1015
    _base_doc = LGBMModel.fit.__doc__.replace("self : LGBMModel", "self : LGBMRegressor")  # type: ignore
1016
1017
    _base_doc = (_base_doc[:_base_doc.find('group :')]  # type: ignore
                 + _base_doc[_base_doc.find('eval_set :'):])  # type: ignore
1018
1019
1020
1021
    _base_doc = (_base_doc[:_base_doc.find('eval_class_weight :')]
                 + _base_doc[_base_doc.find('eval_init_score :'):])
    fit.__doc__ = (_base_doc[:_base_doc.find('eval_group :')]
                   + _base_doc[_base_doc.find('eval_metric :'):])
wxchan's avatar
wxchan committed
1022

1023

1024
class LGBMClassifier(_LGBMClassifierBase, LGBMModel):
1025
    """LightGBM classifier."""
wxchan's avatar
wxchan committed
1026

1027
1028
1029
1030
1031
1032
1033
    def fit(
        self,
        X,
        y,
        sample_weight=None,
        init_score=None,
        eval_set=None,
1034
        eval_names: Optional[List[str]] = None,
1035
1036
1037
        eval_sample_weight=None,
        eval_class_weight=None,
        eval_init_score=None,
1038
        eval_metric: Optional[_LGBM_ScikitEvalMetricType] = None,
1039
1040
1041
        feature_name='auto',
        categorical_feature='auto',
        callbacks=None,
1042
        init_model: Optional[Union[str, Path, Booster, LGBMModel]] = None
1043
    ):
1044
        """Docstring is inherited from the LGBMModel."""
1045
        _LGBMAssertAllFinite(y)
1046
1047
        _LGBMCheckClassificationTargets(y)
        self._le = _LGBMLabelEncoder().fit(y)
1048
        _y = self._le.transform(y)
1049
        self._class_map = dict(zip(self._le.classes_, self._le.transform(self._le.classes_)))
1050
1051
        if isinstance(self.class_weight, dict):
            self._class_weight = {self._class_map[k]: v for k, v in self.class_weight.items()}
1052

1053
1054
        self._classes = self._le.classes_
        self._n_classes = len(self._classes)
1055
1056

        if not callable(eval_metric):
1057
            if isinstance(eval_metric, (str, type(None))):
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
                eval_metric = [eval_metric]
            if self._n_classes > 2:
                for index, metric in enumerate(eval_metric):
                    if metric in {'logloss', 'binary_logloss'}:
                        eval_metric[index] = "multi_logloss"
                    elif metric in {'error', 'binary_error'}:
                        eval_metric[index] = "multi_error"
            else:
                for index, metric in enumerate(eval_metric):
                    if metric in {'logloss', 'multi_logloss'}:
                        eval_metric[index] = 'binary_logloss'
                    elif metric in {'error', 'multi_error'}:
                        eval_metric[index] = 'binary_error'
wxchan's avatar
wxchan committed
1071

1072
1073
        # do not modify args, as it causes errors in model selection tools
        valid_sets = None
wxchan's avatar
wxchan committed
1074
        if eval_set is not None:
1075
1076
            if isinstance(eval_set, tuple):
                eval_set = [eval_set]
1077
            valid_sets = [None] * len(eval_set)
1078
1079
            for i, (valid_x, valid_y) in enumerate(eval_set):
                if valid_x is X and valid_y is y:
1080
                    valid_sets[i] = (valid_x, _y)
1081
                else:
1082
                    valid_sets[i] = (valid_x, self._le.transform(valid_y))
1083

1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
        super().fit(
            X,
            _y,
            sample_weight=sample_weight,
            init_score=init_score,
            eval_set=valid_sets,
            eval_names=eval_names,
            eval_sample_weight=eval_sample_weight,
            eval_class_weight=eval_class_weight,
            eval_init_score=eval_init_score,
            eval_metric=eval_metric,
            feature_name=feature_name,
            categorical_feature=categorical_feature,
            callbacks=callbacks,
            init_model=init_model
        )
wxchan's avatar
wxchan committed
1100
1101
        return self

1102
    _base_doc = LGBMModel.fit.__doc__.replace("self : LGBMModel", "self : LGBMClassifier")  # type: ignore
1103
1104
    _base_doc = (_base_doc[:_base_doc.find('group :')]  # type: ignore
                 + _base_doc[_base_doc.find('eval_set :'):])  # type: ignore
1105
1106
    fit.__doc__ = (_base_doc[:_base_doc.find('eval_group :')]
                   + _base_doc[_base_doc.find('eval_metric :'):])
1107

1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
    def predict(
        self,
        X,
        raw_score: bool = False,
        start_iteration: int = 0,
        num_iteration: Optional[int] = None,
        pred_leaf: bool = False,
        pred_contrib: bool = False,
        validate_features: bool = False,
        **kwargs: Any
    ):
1119
        """Docstring is inherited from the LGBMModel."""
1120
        result = self.predict_proba(X, raw_score, start_iteration, num_iteration,
1121
1122
                                    pred_leaf, pred_contrib, validate_features,
                                    **kwargs)
1123
        if callable(self._objective) or raw_score or pred_leaf or pred_contrib:
1124
1125
1126
1127
            return result
        else:
            class_index = np.argmax(result, axis=1)
            return self._le.inverse_transform(class_index)
wxchan's avatar
wxchan committed
1128

1129
1130
    predict.__doc__ = LGBMModel.predict.__doc__

1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
    def predict_proba(
        self,
        X,
        raw_score: bool = False,
        start_iteration: int = 0,
        num_iteration: Optional[int] = None,
        pred_leaf: bool = False,
        pred_contrib: bool = False,
        validate_features: bool = False,
        **kwargs: Any
    ):
1142
        """Docstring is set after definition, using a template."""
1143
        result = super().predict(X, raw_score, start_iteration, num_iteration, pred_leaf, pred_contrib, validate_features, **kwargs)
1144
        if callable(self._objective) and not (raw_score or pred_leaf or pred_contrib):
1145
1146
1147
            _log_warning("Cannot compute class probabilities or labels "
                         "due to the usage of customized objective function.\n"
                         "Returning raw scores instead.")
1148
1149
            return result
        elif self._n_classes > 2 or raw_score or pred_leaf or pred_contrib:
1150
            return result
wxchan's avatar
wxchan committed
1151
        else:
1152
            return np.vstack((1. - result, result)).transpose()
1153

1154
1155
1156
1157
    predict_proba.__doc__ = _lgbmmodel_doc_predict.format(
        description="Return the predicted probability for each class for each sample.",
        X_shape="array-like or sparse matrix of shape = [n_samples, n_features]",
        output_name="predicted_probability",
1158
        predicted_result_shape="array-like of shape = [n_samples] or shape = [n_samples, n_classes]",
1159
1160
1161
1162
        X_leaves_shape="array-like of shape = [n_samples, n_trees] or shape = [n_samples, n_trees * n_classes]",
        X_SHAP_values_shape="array-like of shape = [n_samples, n_features + 1] or shape = [n_samples, (n_features + 1) * n_classes] or list with n_classes length of such objects"
    )

1163
1164
    @property
    def classes_(self):
1165
        """:obj:`array` of shape = [n_classes]: The class label array."""
1166
        if not self.__sklearn_is_fitted__():
1167
1168
            raise LGBMNotFittedError('No classes found. Need to call fit beforehand.')
        return self._classes
1169
1170

    @property
1171
    def n_classes_(self) -> int:
1172
        """:obj:`int`: The number of classes."""
1173
        if not self.__sklearn_is_fitted__():
1174
1175
            raise LGBMNotFittedError('No classes found. Need to call fit beforehand.')
        return self._n_classes
wxchan's avatar
wxchan committed
1176

wxchan's avatar
wxchan committed
1177

wxchan's avatar
wxchan committed
1178
class LGBMRanker(LGBMModel):
1179
1180
1181
1182
1183
1184
1185
1186
    """LightGBM ranker.

    .. warning::

        scikit-learn doesn't support ranking applications yet,
        therefore this class is not really compatible with the sklearn ecosystem.
        Please use this class mainly for training and applying ranking models in common sklearnish way.
    """
wxchan's avatar
wxchan committed
1187

1188
1189
1190
1191
1192
1193
1194
1195
    def fit(
        self,
        X,
        y,
        sample_weight=None,
        init_score=None,
        group=None,
        eval_set=None,
1196
        eval_names: Optional[List[str]] = None,
1197
1198
1199
        eval_sample_weight=None,
        eval_init_score=None,
        eval_group=None,
1200
1201
        eval_metric: Optional[_LGBM_ScikitEvalMetricType] = None,
        eval_at: Union[List[int], Tuple[int]] = (1, 2, 3, 4, 5),
1202
1203
1204
        feature_name='auto',
        categorical_feature='auto',
        callbacks=None,
1205
        init_model: Optional[Union[str, Path, Booster, LGBMModel]] = None
1206
    ):
1207
        """Docstring is inherited from the LGBMModel."""
1208
        # check group data
Guolin Ke's avatar
Guolin Ke committed
1209
        if group is None:
1210
            raise ValueError("Should set group for ranking task")
wxchan's avatar
wxchan committed
1211
1212

        if eval_set is not None:
Guolin Ke's avatar
Guolin Ke committed
1213
            if eval_group is None:
1214
                raise ValueError("Eval_group cannot be None when eval_set is not None")
Guolin Ke's avatar
Guolin Ke committed
1215
            elif len(eval_group) != len(eval_set):
1216
                raise ValueError("Length of eval_group should be equal to eval_set")
1217
            elif (isinstance(eval_group, dict)
1218
                  and any(i not in eval_group or eval_group[i] is None for i in range(len(eval_group)))
1219
1220
                  or isinstance(eval_group, list)
                  and any(group is None for group in eval_group)):
1221
1222
                raise ValueError("Should set group for all eval datasets for ranking task; "
                                 "if you use dict, the index should start from 0")
1223

1224
        self._eval_at = eval_at
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
        super().fit(
            X,
            y,
            sample_weight=sample_weight,
            init_score=init_score,
            group=group,
            eval_set=eval_set,
            eval_names=eval_names,
            eval_sample_weight=eval_sample_weight,
            eval_init_score=eval_init_score,
            eval_group=eval_group,
            eval_metric=eval_metric,
            feature_name=feature_name,
            categorical_feature=categorical_feature,
            callbacks=callbacks,
            init_model=init_model
        )
wxchan's avatar
wxchan committed
1242
        return self
1243

1244
    _base_doc = LGBMModel.fit.__doc__.replace("self : LGBMModel", "self : LGBMRanker")  # type: ignore
1245
1246
    fit.__doc__ = (_base_doc[:_base_doc.find('eval_class_weight :')]  # type: ignore
                   + _base_doc[_base_doc.find('eval_init_score :'):])  # type: ignore
1247
    _base_doc = fit.__doc__
1248
    _before_feature_name, _feature_name, _after_feature_name = _base_doc.partition('feature_name :')
1249
    fit.__doc__ = f"""{_before_feature_name}eval_at : list or tuple of int, optional (default=(1, 2, 3, 4, 5))
1250
        The evaluation positions of the specified metric.
1251
    {_feature_name}{_after_feature_name}"""