sklearn.py 57.5 KB
Newer Older
wxchan's avatar
wxchan committed
1
# coding: utf-8
2
"""Scikit-learn wrapper interface for LightGBM."""
3
import copy
4
from inspect import signature
5
from pathlib import Path
6
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
7

wxchan's avatar
wxchan committed
8
import numpy as np
9

10
from .basic import (Booster, Dataset, LightGBMError, _choose_param_value, _ConfigAliases, _LGBM_BoosterBestScoreType,
11
12
                    _LGBM_CategoricalFeatureConfiguration, _LGBM_EvalFunctionResultType, _LGBM_FeatureNameConfiguration,
                    _log_warning)
13
from .callback import _EvalResultDict, record_evaluation
14
15
from .compat import (SKLEARN_INSTALLED, LGBMNotFittedError, _LGBMAssertAllFinite, _LGBMCheckArray,
                     _LGBMCheckClassificationTargets, _LGBMCheckSampleWeight, _LGBMCheckXY, _LGBMClassifierBase,
16
17
                     _LGBMComputeSampleWeight, _LGBMCpuCount, _LGBMLabelEncoder, _LGBMModelBase, _LGBMRegressorBase,
                     dt_DataTable, pd_DataFrame)
wxchan's avatar
wxchan committed
18
from .engine import train
19

20
21
22
23
24
25
26
__all__ = [
    'LGBMClassifier',
    'LGBMModel',
    'LGBMRanker',
    'LGBMRegressor',
]

27
28
29
_LGBM_ScikitCustomObjectiveFunction = Union[
    Callable[
        [np.ndarray, np.ndarray],
30
        Tuple[np.ndarray, np.ndarray]
31
32
33
    ],
    Callable[
        [np.ndarray, np.ndarray, np.ndarray],
34
        Tuple[np.ndarray, np.ndarray]
35
    ],
36
37
38
39
    Callable[
        [np.ndarray, np.ndarray, np.ndarray, np.ndarray],
        Tuple[np.ndarray, np.ndarray]
    ],
40
41
42
43
]
_LGBM_ScikitCustomEvalFunction = Union[
    Callable[
        [np.ndarray, np.ndarray],
44
        Union[_LGBM_EvalFunctionResultType, List[_LGBM_EvalFunctionResultType]]
45
46
47
    ],
    Callable[
        [np.ndarray, np.ndarray, np.ndarray],
48
        Union[_LGBM_EvalFunctionResultType, List[_LGBM_EvalFunctionResultType]]
49
50
51
    ],
    Callable[
        [np.ndarray, np.ndarray, np.ndarray, np.ndarray],
52
        Union[_LGBM_EvalFunctionResultType, List[_LGBM_EvalFunctionResultType]]
53
54
    ],
]
55
56
57
58
59
_LGBM_ScikitEvalMetricType = Union[
    str,
    _LGBM_ScikitCustomEvalFunction,
    List[Union[str, _LGBM_ScikitCustomEvalFunction]]
]
60

wxchan's avatar
wxchan committed
61

62
class _ObjectiveFunctionWrapper:
63
    """Proxy class for objective function."""
64

65
    def __init__(self, func: _LGBM_ScikitCustomObjectiveFunction):
66
        """Construct a proxy class.
67

68
69
        This class transforms objective function to match objective function with signature ``new_func(preds, dataset)``
        as expected by ``lightgbm.engine.train``.
70

71
72
73
        Parameters
        ----------
        func : callable
74
75
76
77
            Expects a callable with following signatures:
            ``func(y_true, y_pred)``,
            ``func(y_true, y_pred, weight)``
            or ``func(y_true, y_pred, weight, group)``
78
79
            and returns (grad, hess):

80
                y_true : numpy 1-D array of shape = [n_samples]
81
                    The target values.
82
                y_pred : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
83
                    The predicted values.
84
85
                    Predicted values are returned before any transformation,
                    e.g. they are raw margin instead of probability of positive class for binary task.
86
87
                weight : numpy 1-D array of shape = [n_samples]
                    The weight of samples. Weights should be non-negative.
88
                group : numpy 1-D array
89
90
91
                    Group/query data.
                    Only used in the learning-to-rank task.
                    sum(group) = n_samples.
92
93
                    For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
                    where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
94
                grad : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape [n_samples, n_classes] (for multi-class task)
95
96
                    The value of the first order derivative (gradient) of the loss
                    with respect to the elements of y_pred for each sample point.
97
                hess : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
98
99
                    The value of the second order derivative (Hessian) of the loss
                    with respect to the elements of y_pred for each sample point.
wxchan's avatar
wxchan committed
100

Nikita Titov's avatar
Nikita Titov committed
101
102
        .. note::

103
            For multi-class task, y_pred is a numpy 2-D array of shape = [n_samples, n_classes],
104
            and grad and hess should be returned in the same format.
105
106
        """
        self.func = func
wxchan's avatar
wxchan committed
107

108
    def __call__(self, preds: np.ndarray, dataset: Dataset) -> Tuple[np.ndarray, np.ndarray]:
109
110
111
112
        """Call passed function with appropriate arguments.

        Parameters
        ----------
113
        preds : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
114
115
116
117
118
119
            The predicted values.
        dataset : Dataset
            The training dataset.

        Returns
        -------
120
        grad : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
121
122
            The value of the first order derivative (gradient) of the loss
            with respect to the elements of preds for each sample point.
123
        hess : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
124
125
            The value of the second order derivative (Hessian) of the loss
            with respect to the elements of preds for each sample point.
126
        """
wxchan's avatar
wxchan committed
127
        labels = dataset.get_label()
128
        argc = len(signature(self.func).parameters)
129
        if argc == 2:
130
            grad, hess = self.func(labels, preds)
131
        elif argc == 3:
132
133
134
            grad, hess = self.func(labels, preds, dataset.get_weight())
        elif argc == 4:
            grad, hess = self.func(labels, preds, dataset.get_weight(), dataset.get_group())
135
        else:
136
            raise TypeError(f"Self-defined objective function should have 2, 3 or 4 arguments, got {argc}")
wxchan's avatar
wxchan committed
137
138
        return grad, hess

wxchan's avatar
wxchan committed
139

140
class _EvalFunctionWrapper:
141
    """Proxy class for evaluation function."""
142

143
    def __init__(self, func: _LGBM_ScikitCustomEvalFunction):
144
        """Construct a proxy class.
145

146
147
        This class transforms evaluation function to match evaluation function with signature ``new_func(preds, dataset)``
        as expected by ``lightgbm.engine.train``.
148

149
150
151
152
153
154
155
156
157
158
        Parameters
        ----------
        func : callable
            Expects a callable with following signatures:
            ``func(y_true, y_pred)``,
            ``func(y_true, y_pred, weight)``
            or ``func(y_true, y_pred, weight, group)``
            and returns (eval_name, eval_result, is_higher_better) or
            list of (eval_name, eval_result, is_higher_better):

159
                y_true : numpy 1-D array of shape = [n_samples]
160
                    The target values.
161
                y_pred : numpy 1-D array of shape = [n_samples] or numpy 2-D array shape = [n_samples, n_classes] (for multi-class task)
162
                    The predicted values.
163
164
                    In case of custom ``objective``, predicted values are returned before any transformation,
                    e.g. they are raw margin instead of probability of positive class for binary task in this case.
165
                weight : numpy 1-D array of shape = [n_samples]
166
                    The weight of samples. Weights should be non-negative.
167
                group : numpy 1-D array
168
169
170
                    Group/query data.
                    Only used in the learning-to-rank task.
                    sum(group) = n_samples.
171
172
                    For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
                    where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
173
                eval_name : str
Andrew Ziem's avatar
Andrew Ziem committed
174
                    The name of evaluation function (without whitespace).
175
176
177
178
179
180
                eval_result : float
                    The eval result.
                is_higher_better : bool
                    Is eval result higher better, e.g. AUC is ``is_higher_better``.
        """
        self.func = func
181

182
183
184
185
186
    def __call__(
        self,
        preds: np.ndarray,
        dataset: Dataset
    ) -> Union[_LGBM_EvalFunctionResultType, List[_LGBM_EvalFunctionResultType]]:
187
        """Call passed function with appropriate arguments.
188

189
190
        Parameters
        ----------
191
        preds : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
192
193
194
195
196
197
            The predicted values.
        dataset : Dataset
            The training dataset.

        Returns
        -------
198
        eval_name : str
Andrew Ziem's avatar
Andrew Ziem committed
199
            The name of evaluation function (without whitespace).
200
201
202
203
204
        eval_result : float
            The eval result.
        is_higher_better : bool
            Is eval result higher better, e.g. AUC is ``is_higher_better``.
        """
205
        labels = dataset.get_label()
206
        argc = len(signature(self.func).parameters)
207
        if argc == 2:
208
            return self.func(labels, preds)
209
        elif argc == 3:
210
            return self.func(labels, preds, dataset.get_weight())
211
        elif argc == 4:
212
            return self.func(labels, preds, dataset.get_weight(), dataset.get_group())
213
        else:
214
            raise TypeError(f"Self-defined eval function should have 2, 3 or 4 arguments, got {argc}")
215

wxchan's avatar
wxchan committed
216

217
218
219
220
221
222
223
224
225
226
227
228
229
230
# documentation templates for LGBMModel methods are shared between the classes in
# this module and those in the ``dask`` module

_lgbmmodel_doc_fit = (
    """
    Build a gradient boosting model from the training set (X, y).

    Parameters
    ----------
    X : {X_shape}
        Input feature matrix.
    y : {y_shape}
        The target values (class labels in classification, real numbers in regression).
    sample_weight : {sample_weight_shape}
231
        Weights of training data. Weights should be non-negative.
232
    init_score : {init_score_shape}
233
234
235
236
237
238
239
240
241
        Init score of training data.
    group : {group_shape}
        Group/query data.
        Only used in the learning-to-rank task.
        sum(group) = n_samples.
        For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
        where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
    eval_set : list or None, optional (default=None)
        A list of (X, y) tuple pairs to use as validation sets.
242
    eval_names : list of str, or None, optional (default=None)
243
        Names of eval_set.
244
    eval_sample_weight : {eval_sample_weight_shape}
245
        Weights of eval data. Weights should be non-negative.
246
247
    eval_class_weight : list or None, optional (default=None)
        Class weights of eval data.
248
    eval_init_score : {eval_init_score_shape}
249
        Init score of eval data.
250
    eval_group : {eval_group_shape}
251
        Group data of eval data.
252
253
    eval_metric : str, callable, list or None, optional (default=None)
        If str, it should be a built-in evaluation metric to use.
254
255
256
257
        If callable, it should be a custom evaluation metric, see note below for more details.
        If list, it can be a list of built-in metrics, a list of custom evaluation metrics, or a mix of both.
        In either case, the ``metric`` from the model parameters will be evaluated and used as well.
        Default: 'l2' for LGBMRegressor, 'logloss' for LGBMClassifier, 'ndcg' for LGBMRanker.
258
    feature_name : list of str, or 'auto', optional (default='auto')
259
260
        Feature names.
        If 'auto' and data is pandas DataFrame, data columns names are used.
261
    categorical_feature : list of str or int, or 'auto', optional (default='auto')
262
263
        Categorical features.
        If list of int, interpreted as indices.
264
        If list of str, interpreted as feature names (need to specify ``feature_name`` as well).
265
        If 'auto' and data is pandas DataFrame, pandas unordered categorical columns are used.
266
        All values in categorical features will be cast to int32 and thus should be less than int32 max value (2147483647).
267
268
269
        Large values could be memory consuming. Consider using consecutive integers starting from zero.
        All negative values in categorical features will be treated as missing values.
        The output cannot be monotonically constrained with respect to a categorical feature.
270
        Floating point numbers in categorical features will be rounded towards 0.
271
    callbacks : list of callable, or None, optional (default=None)
272
273
        List of callback functions that are applied at each iteration.
        See Callbacks in Python API for more information.
274
    init_model : str, pathlib.Path, Booster, LGBMModel or None, optional (default=None)
275
276
277
278
        Filename of LightGBM model, Booster instance or LGBMModel instance used for continue training.

    Returns
    -------
279
    self : LGBMModel
280
281
282
283
284
285
286
287
288
289
290
291
292
        Returns self.
    """
)

_lgbmmodel_doc_custom_eval_note = """
    Note
    ----
    Custom eval function expects a callable with following signatures:
    ``func(y_true, y_pred)``, ``func(y_true, y_pred, weight)`` or
    ``func(y_true, y_pred, weight, group)``
    and returns (eval_name, eval_result, is_higher_better) or
    list of (eval_name, eval_result, is_higher_better):

293
        y_true : numpy 1-D array of shape = [n_samples]
294
            The target values.
295
        y_pred : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
296
            The predicted values.
297
298
            In case of custom ``objective``, predicted values are returned before any transformation,
            e.g. they are raw margin instead of probability of positive class for binary task in this case.
299
        weight : numpy 1-D array of shape = [n_samples]
300
            The weight of samples. Weights should be non-negative.
301
        group : numpy 1-D array
302
303
304
305
306
            Group/query data.
            Only used in the learning-to-rank task.
            sum(group) = n_samples.
            For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
            where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
307
        eval_name : str
Andrew Ziem's avatar
Andrew Ziem committed
308
            The name of evaluation function (without whitespace).
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
        eval_result : float
            The eval result.
        is_higher_better : bool
            Is eval result higher better, e.g. AUC is ``is_higher_better``.
"""

_lgbmmodel_doc_predict = (
    """
    {description}

    Parameters
    ----------
    X : {X_shape}
        Input features matrix.
    raw_score : bool, optional (default=False)
        Whether to predict raw scores.
    start_iteration : int, optional (default=0)
        Start index of the iteration to predict.
        If <= 0, starts from the first iteration.
    num_iteration : int or None, optional (default=None)
        Total number of iterations used in the prediction.
        If None, if the best iteration exists and start_iteration <= 0, the best iteration is used;
        otherwise, all iterations from ``start_iteration`` are used (no limits).
        If <= 0, all iterations from ``start_iteration`` are used (no limits).
    pred_leaf : bool, optional (default=False)
        Whether to predict leaf index.
    pred_contrib : bool, optional (default=False)
        Whether to predict feature contributions.

        .. note::

            If you want to get more explanations for your model's predictions using SHAP values,
            like SHAP interaction values,
            you can install the shap package (https://github.com/slundberg/shap).
            Note that unlike the shap package, with ``pred_contrib`` we return a matrix with an extra
            column, where the last column is the expected value.

346
347
348
    validate_features : bool, optional (default=False)
        If True, ensure that the features used to predict match the ones used to train.
        Used only if data is pandas DataFrame.
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
    **kwargs
        Other parameters for the prediction.

    Returns
    -------
    {output_name} : {predicted_result_shape}
        The predicted values.
    X_leaves : {X_leaves_shape}
        If ``pred_leaf=True``, the predicted leaf of every tree for each sample.
    X_SHAP_values : {X_SHAP_values_shape}
        If ``pred_contrib=True``, the feature contributions for each sample.
    """
)


364
365
class LGBMModel(_LGBMModelBase):
    """Implementation of the scikit-learn API for LightGBM."""
wxchan's avatar
wxchan committed
366

367
368
369
370
371
372
373
374
    def __init__(
        self,
        boosting_type: str = 'gbdt',
        num_leaves: int = 31,
        max_depth: int = -1,
        learning_rate: float = 0.1,
        n_estimators: int = 100,
        subsample_for_bin: int = 200000,
375
        objective: Optional[Union[str, _LGBM_ScikitCustomObjectiveFunction]] = None,
376
377
378
379
380
381
382
383
384
385
        class_weight: Optional[Union[Dict, str]] = None,
        min_split_gain: float = 0.,
        min_child_weight: float = 1e-3,
        min_child_samples: int = 20,
        subsample: float = 1.,
        subsample_freq: int = 0,
        colsample_bytree: float = 1.,
        reg_alpha: float = 0.,
        reg_lambda: float = 0.,
        random_state: Optional[Union[int, np.random.RandomState]] = None,
386
        n_jobs: Optional[int] = None,
387
388
389
        importance_type: str = 'split',
        **kwargs
    ):
390
        r"""Construct a gradient boosting model.
wxchan's avatar
wxchan committed
391
392
393

        Parameters
        ----------
394
        boosting_type : str, optional (default='gbdt')
395
396
397
398
            'gbdt', traditional Gradient Boosting Decision Tree.
            'dart', Dropouts meet Multiple Additive Regression Trees.
            'rf', Random Forest.
        num_leaves : int, optional (default=31)
wxchan's avatar
wxchan committed
399
            Maximum tree leaves for base learners.
400
        max_depth : int, optional (default=-1)
401
            Maximum tree depth for base learners, <=0 means no limit.
402
        learning_rate : float, optional (default=0.1)
403
            Boosting learning rate.
404
405
406
            You can use ``callbacks`` parameter of ``fit`` method to shrink/adapt learning rate
            in training using ``reset_parameter`` callback.
            Note, that this will ignore the ``learning_rate`` argument in training.
407
        n_estimators : int, optional (default=100)
wxchan's avatar
wxchan committed
408
            Number of boosted trees to fit.
409
        subsample_for_bin : int, optional (default=200000)
wxchan's avatar
wxchan committed
410
            Number of samples for constructing bins.
411
        objective : str, callable or None, optional (default=None)
wxchan's avatar
wxchan committed
412
413
            Specify the learning task and the corresponding learning objective or
            a custom objective function to be used (see note below).
414
            Default: 'regression' for LGBMRegressor, 'binary' or 'multiclass' for LGBMClassifier, 'lambdarank' for LGBMRanker.
415
416
417
418
        class_weight : dict, 'balanced' or None, optional (default=None)
            Weights associated with classes in the form ``{class_label: weight}``.
            Use this parameter only for multi-class classification task;
            for binary classification task you may use ``is_unbalance`` or ``scale_pos_weight`` parameters.
419
420
421
            Note, that the usage of all these parameters will result in poor estimates of the individual class probabilities.
            You may want to consider performing probability calibration
            (https://scikit-learn.org/stable/modules/calibration.html) of your model.
422
423
424
            The 'balanced' mode uses the values of y to automatically adjust weights
            inversely proportional to class frequencies in the input data as ``n_samples / (n_classes * np.bincount(y))``.
            If None, all classes are supposed to have weight one.
425
            Note, that these weights will be multiplied with ``sample_weight`` (passed through the ``fit`` method)
426
            if ``sample_weight`` is specified.
427
        min_split_gain : float, optional (default=0.)
wxchan's avatar
wxchan committed
428
            Minimum loss reduction required to make a further partition on a leaf node of the tree.
429
        min_child_weight : float, optional (default=1e-3)
430
            Minimum sum of instance weight (Hessian) needed in a child (leaf).
431
        min_child_samples : int, optional (default=20)
432
            Minimum number of data needed in a child (leaf).
433
        subsample : float, optional (default=1.)
wxchan's avatar
wxchan committed
434
            Subsample ratio of the training instance.
435
        subsample_freq : int, optional (default=0)
Andrew Ziem's avatar
Andrew Ziem committed
436
            Frequency of subsample, <=0 means no enable.
437
        colsample_bytree : float, optional (default=1.)
wxchan's avatar
wxchan committed
438
            Subsample ratio of columns when constructing each tree.
439
        reg_alpha : float, optional (default=0.)
440
            L1 regularization term on weights.
441
        reg_lambda : float, optional (default=0.)
442
            L2 regularization term on weights.
443
        random_state : int, RandomState object or None, optional (default=None)
wxchan's avatar
wxchan committed
444
            Random number seed.
445
446
447
            If int, this number is used to seed the C++ code.
            If RandomState object (numpy), a random integer is picked based on its state to seed the C++ code.
            If None, default seeds in C++ code are used.
448
449
450
451
452
453
454
455
456
457
458
459
        n_jobs : int or None, optional (default=None)
            Number of parallel threads to use for training (can be changed at prediction time by
            passing it as an extra keyword argument).

            For better performance, it is recommended to set this to the number of physical cores
            in the CPU.

            Negative integers are interpreted as following joblib's formula (n_cpus + 1 + n_jobs), just like
            scikit-learn (so e.g. -1 means using all threads). A value of zero corresponds the default number of
            threads configured for OpenMP in the system. A value of ``None`` (the default) corresponds
            to using the number of physical cores in the system (its correct detection requires
            either the ``joblib`` or the ``psutil`` util libraries to be installed).
460
        importance_type : str, optional (default='split')
461
            The type of feature importance to be filled into ``feature_importances_``.
462
463
464
465
            If 'split', result contains numbers of times the feature is used in a model.
            If 'gain', result contains total gains of splits which use the feature.
        **kwargs
            Other parameters for the model.
wxchan's avatar
wxchan committed
466
            Check http://lightgbm.readthedocs.io/en/latest/Parameters.html for more parameters.
467

Nikita Titov's avatar
Nikita Titov committed
468
469
470
            .. warning::

                \*\*kwargs is not supported in sklearn, it may cause unexpected issues.
wxchan's avatar
wxchan committed
471
472
473

        Note
        ----
474
475
        A custom objective function can be provided for the ``objective`` parameter.
        In this case, it should have the signature
476
477
478
        ``objective(y_true, y_pred) -> grad, hess``,
        ``objective(y_true, y_pred, weight) -> grad, hess``
        or ``objective(y_true, y_pred, weight, group) -> grad, hess``:
wxchan's avatar
wxchan committed
479

480
            y_true : numpy 1-D array of shape = [n_samples]
481
                The target values.
482
            y_pred : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
483
                The predicted values.
484
485
                Predicted values are returned before any transformation,
                e.g. they are raw margin instead of probability of positive class for binary task.
486
487
            weight : numpy 1-D array of shape = [n_samples]
                The weight of samples. Weights should be non-negative.
488
            group : numpy 1-D array
489
490
491
                Group/query data.
                Only used in the learning-to-rank task.
                sum(group) = n_samples.
492
493
                For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
                where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
494
            grad : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
495
496
                The value of the first order derivative (gradient) of the loss
                with respect to the elements of y_pred for each sample point.
497
            hess : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
498
499
                The value of the second order derivative (Hessian) of the loss
                with respect to the elements of y_pred for each sample point.
wxchan's avatar
wxchan committed
500

501
        For multi-class task, y_pred is a numpy 2-D array of shape = [n_samples, n_classes],
502
        and grad and hess should be returned in the same format.
wxchan's avatar
wxchan committed
503
        """
wxchan's avatar
wxchan committed
504
        if not SKLEARN_INSTALLED:
505
506
            raise LightGBMError('scikit-learn is required for lightgbm.sklearn. '
                                'You must install scikit-learn and restart your session to use this module.')
wxchan's avatar
wxchan committed
507

508
        self.boosting_type = boosting_type
509
        self.objective = objective
wxchan's avatar
wxchan committed
510
511
512
513
        self.num_leaves = num_leaves
        self.max_depth = max_depth
        self.learning_rate = learning_rate
        self.n_estimators = n_estimators
wxchan's avatar
wxchan committed
514
        self.subsample_for_bin = subsample_for_bin
wxchan's avatar
wxchan committed
515
516
517
518
519
520
521
522
        self.min_split_gain = min_split_gain
        self.min_child_weight = min_child_weight
        self.min_child_samples = min_child_samples
        self.subsample = subsample
        self.subsample_freq = subsample_freq
        self.colsample_bytree = colsample_bytree
        self.reg_alpha = reg_alpha
        self.reg_lambda = reg_lambda
523
524
        self.random_state = random_state
        self.n_jobs = n_jobs
525
        self.importance_type = importance_type
526
        self._Booster: Optional[Booster] = None
527
528
        self._evals_result: _EvalResultDict = {}
        self._best_score: _LGBM_BoosterBestScoreType = {}
529
        self._best_iteration: int = -1
530
        self._other_params: Dict[str, Any] = {}
531
        self._objective = objective
532
        self.class_weight = class_weight
533
534
        self._class_weight: Optional[Union[Dict, str]] = None
        self._class_map: Optional[Dict[int, int]] = None
535
536
        self._n_features: int = -1
        self._n_features_in: int = -1
537
        self._classes: Optional[np.ndarray] = None
538
        self._n_classes: int = -1
539
        self.set_params(**kwargs)
wxchan's avatar
wxchan committed
540

541
    def _more_tags(self) -> Dict[str, Any]:
542
543
544
545
546
547
548
549
550
551
        return {
            'allow_nan': True,
            'X_types': ['2darray', 'sparse', '1dlabels'],
            '_xfail_checks': {
                'check_no_attributes_set_in_init':
                'scikit-learn incorrectly asserts that private attributes '
                'cannot be set in __init__: '
                '(see https://github.com/microsoft/LightGBM/issues/2628)'
            }
        }
Nikita Titov's avatar
Nikita Titov committed
552

553
554
555
    def __sklearn_is_fitted__(self) -> bool:
        return getattr(self, "fitted_", False)

556
    def get_params(self, deep: bool = True) -> Dict[str, Any]:
557
558
559
560
561
562
563
564
565
566
567
568
569
        """Get parameters for this estimator.

        Parameters
        ----------
        deep : bool, optional (default=True)
            If True, will return the parameters for this estimator and
            contained subobjects that are estimators.

        Returns
        -------
        params : dict
            Parameter names mapped to their values.
        """
570
        params = super().get_params(deep=deep)
571
        params.update(self._other_params)
wxchan's avatar
wxchan committed
572
573
        return params

574
    def set_params(self, **params: Any) -> "LGBMModel":
575
576
577
578
579
580
581
582
583
584
585
586
        """Set the parameters of this estimator.

        Parameters
        ----------
        **params
            Parameter names with their new values.

        Returns
        -------
        self : object
            Returns self.
        """
wxchan's avatar
wxchan committed
587
588
        for key, value in params.items():
            setattr(self, key, value)
589
590
            if hasattr(self, f"_{key}"):
                setattr(self, f"_{key}", value)
591
            self._other_params[key] = value
wxchan's avatar
wxchan committed
592
        return self
wxchan's avatar
wxchan committed
593

594
595
596
597
598
599
600
601
602
603
604
605
606
607
    def _process_params(self, stage: str) -> Dict[str, Any]:
        """Process the parameters of this estimator based on its type, parameter aliases, etc.

        Parameters
        ----------
        stage : str
            Name of the stage (can be ``fit`` or ``predict``) this method is called from.

        Returns
        -------
        processed_params : dict
            Processed parameter names mapped to their values.
        """
        assert stage in {"fit", "predict"}
608
609
610
611
612
        params = self.get_params()

        params.pop('objective', None)
        for alias in _ConfigAliases.get('objective'):
            if alias in params:
613
                obj = params.pop(alias)
614
                _log_warning(f"Found '{alias}' in params. Will use it instead of 'objective' argument")
615
616
617
618
619
620
621
622
623
624
625
626
627
                if stage == "fit":
                    self._objective = obj
        if stage == "fit":
            if self._objective is None:
                if isinstance(self, LGBMRegressor):
                    self._objective = "regression"
                elif isinstance(self, LGBMClassifier):
                    if self._n_classes > 2:
                        self._objective = "multiclass"
                    else:
                        self._objective = "binary"
                elif isinstance(self, LGBMRanker):
                    self._objective = "lambdarank"
628
                else:
629
                    raise ValueError("Unknown LGBMModel type.")
630
        if callable(self._objective):
631
            if stage == "fit":
632
633
634
                params['objective'] = _ObjectiveFunctionWrapper(self._objective)
            else:
                params['objective'] = 'None'
635
        else:
636
            params['objective'] = self._objective
637

638
        params.pop('importance_type', None)
wxchan's avatar
wxchan committed
639
        params.pop('n_estimators', None)
640
        params.pop('class_weight', None)
641

642
643
        if isinstance(params['random_state'], np.random.RandomState):
            params['random_state'] = params['random_state'].randint(np.iinfo(np.int32).max)
644
        if self._n_classes > 2:
645
646
            for alias in _ConfigAliases.get('num_class'):
                params.pop(alias, None)
647
648
            params['num_class'] = self._n_classes
        if hasattr(self, '_eval_at'):
649
            eval_at = self._eval_at
650
            for alias in _ConfigAliases.get('eval_at'):
651
652
653
654
                if alias in params:
                    _log_warning(f"Found '{alias}' in params. Will use it instead of 'eval_at' argument")
                    eval_at = params.pop(alias)
            params['eval_at'] = eval_at
wxchan's avatar
wxchan committed
655

656
        # register default metric for consistency with callable eval_metric case
657
        original_metric = self._objective if isinstance(self._objective, str) else None
658
659
660
661
662
663
664
665
666
667
        if original_metric is None:
            # try to deduce from class instance
            if isinstance(self, LGBMRegressor):
                original_metric = "l2"
            elif isinstance(self, LGBMClassifier):
                original_metric = "multi_logloss" if self._n_classes > 2 else "binary_logloss"
            elif isinstance(self, LGBMRanker):
                original_metric = "ndcg"

        # overwrite default metric by explicitly set metric
668
        params = _choose_param_value("metric", params, original_metric)
669

670
671
672
673
674
675
        # use joblib conventions for negative n_jobs, just like scikit-learn
        # at predict time, this is handled later due to the order of parameter updates
        if stage == "fit":
            params = _choose_param_value("num_threads", params, self.n_jobs)
            params["num_threads"] = self._process_n_jobs(params["num_threads"])

676
677
        return params

678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
    def _process_n_jobs(self, n_jobs: Optional[int]) -> int:
        """Convert special values of n_jobs to their actual values according to the formulas that apply.

        Parameters
        ----------
        n_jobs : int or None
            The original value of n_jobs, potentially having special values such as 'None' or
            negative integers.

        Returns
        -------
        n_jobs : int
            The value of n_jobs with special values converted to actual number of threads.
        """
        if n_jobs is None:
            n_jobs = _LGBMCpuCount(only_physical_cores=True)
        elif n_jobs < 0:
            n_jobs = max(_LGBMCpuCount(only_physical_cores=False) + 1 + n_jobs, 1)
        return n_jobs

698
699
700
701
702
703
704
705
    def fit(
        self,
        X,
        y,
        sample_weight=None,
        init_score=None,
        group=None,
        eval_set=None,
706
        eval_names: Optional[List[str]] = None,
707
708
709
710
        eval_sample_weight=None,
        eval_class_weight=None,
        eval_init_score=None,
        eval_group=None,
711
        eval_metric: Optional[_LGBM_ScikitEvalMetricType] = None,
712
713
        feature_name: _LGBM_FeatureNameConfiguration = 'auto',
        categorical_feature: _LGBM_CategoricalFeatureConfiguration = 'auto',
714
        callbacks: Optional[List[Callable]] = None,
715
        init_model: Optional[Union[str, Path, Booster, "LGBMModel"]] = None
716
    ) -> "LGBMModel":
717
718
719
720
721
722
723
724
725
726
727
728
729
        """Docstring is set after definition, using a template."""
        params = self._process_params(stage="fit")

        # Do not modify original args in fit function
        # Refer to https://github.com/microsoft/LightGBM/pull/2619
        eval_metric_list = copy.deepcopy(eval_metric)
        if not isinstance(eval_metric_list, list):
            eval_metric_list = [eval_metric_list]

        # Separate built-in from callable evaluation metrics
        eval_metrics_callable = [_EvalFunctionWrapper(f) for f in eval_metric_list if callable(f)]
        eval_metrics_builtin = [m for m in eval_metric_list if isinstance(m, str)]

730
        # concatenate metric from params (or default if not provided in params) and eval_metric
731
732
        params['metric'] = [params['metric']] if isinstance(params['metric'], (str, type(None))) else params['metric']
        params['metric'] = [e for e in eval_metrics_builtin if e not in params['metric']] + params['metric']
733
        params['metric'] = [metric for metric in params['metric'] if metric is not None]
wxchan's avatar
wxchan committed
734

735
        if not isinstance(X, (pd_DataFrame, dt_DataTable)):
736
            _X, _y = _LGBMCheckXY(X, y, accept_sparse=True, force_all_finite=False, ensure_min_samples=2)
737
738
            if sample_weight is not None:
                sample_weight = _LGBMCheckSampleWeight(sample_weight, _X)
739
740
        else:
            _X, _y = X, y
741

742
743
744
745
        if self._class_weight is None:
            self._class_weight = self.class_weight
        if self._class_weight is not None:
            class_sample_weight = _LGBMComputeSampleWeight(self._class_weight, y)
746
747
748
749
            if sample_weight is None or len(sample_weight) == 0:
                sample_weight = class_sample_weight
            else:
                sample_weight = np.multiply(sample_weight, class_sample_weight)
750

751
        self._n_features = _X.shape[1]
752
753
        # copy for consistency
        self._n_features_in = self._n_features
754

755
756
757
        train_set = Dataset(data=_X, label=_y, weight=sample_weight, group=group,
                            init_score=init_score, categorical_feature=categorical_feature,
                            params=params)
Guolin Ke's avatar
Guolin Ke committed
758

759
        valid_sets: List[Dataset] = []
Guolin Ke's avatar
Guolin Ke committed
760
        if eval_set is not None:
761

762
            def _get_meta_data(collection, name, i):
763
764
765
766
767
768
769
                if collection is None:
                    return None
                elif isinstance(collection, list):
                    return collection[i] if len(collection) > i else None
                elif isinstance(collection, dict):
                    return collection.get(i, None)
                else:
770
                    raise TypeError(f"{name} should be dict or list")
771

Guolin Ke's avatar
Guolin Ke committed
772
773
774
            if isinstance(eval_set, tuple):
                eval_set = [eval_set]
            for i, valid_data in enumerate(eval_set):
775
                # reduce cost for prediction training data
Guolin Ke's avatar
Guolin Ke committed
776
777
778
                if valid_data[0] is X and valid_data[1] is y:
                    valid_set = train_set
                else:
779
780
781
782
783
784
                    valid_weight = _get_meta_data(eval_sample_weight, 'eval_sample_weight', i)
                    valid_class_weight = _get_meta_data(eval_class_weight, 'eval_class_weight', i)
                    if valid_class_weight is not None:
                        if isinstance(valid_class_weight, dict) and self._class_map is not None:
                            valid_class_weight = {self._class_map[k]: v for k, v in valid_class_weight.items()}
                        valid_class_sample_weight = _LGBMComputeSampleWeight(valid_class_weight, valid_data[1])
785
786
787
788
                        if valid_weight is None or len(valid_weight) == 0:
                            valid_weight = valid_class_sample_weight
                        else:
                            valid_weight = np.multiply(valid_weight, valid_class_sample_weight)
789
790
                    valid_init_score = _get_meta_data(eval_init_score, 'eval_init_score', i)
                    valid_group = _get_meta_data(eval_group, 'eval_group', i)
791
792
793
794
                    valid_set = Dataset(data=valid_data[0], label=valid_data[1], weight=valid_weight,
                                        group=valid_group, init_score=valid_init_score,
                                        categorical_feature='auto', params=params)

Guolin Ke's avatar
Guolin Ke committed
795
796
                valid_sets.append(valid_set)

797
798
799
        if isinstance(init_model, LGBMModel):
            init_model = init_model.booster_

800
801
802
        if callbacks is None:
            callbacks = []
        else:
803
            callbacks = copy.copy(callbacks)  # don't use deepcopy here to allow non-serializable objects
804

805
        evals_result: _EvalResultDict = {}
806
807
808
809
810
811
812
813
814
815
816
817
818
        callbacks.append(record_evaluation(evals_result))

        self._Booster = train(
            params=params,
            train_set=train_set,
            num_boost_round=self.n_estimators,
            valid_sets=valid_sets,
            valid_names=eval_names,
            feval=eval_metrics_callable,
            init_model=init_model,
            feature_name=feature_name,
            callbacks=callbacks
        )
wxchan's avatar
wxchan committed
819

820
        self._evals_result = evals_result
821
        self._best_iteration = self._Booster.best_iteration
822
        self._best_score = self._Booster.best_score
wxchan's avatar
wxchan committed
823

824
825
        self.fitted_ = True

wxchan's avatar
wxchan committed
826
        # free dataset
827
        self._Booster.free_dataset()
wxchan's avatar
wxchan committed
828
        del train_set, valid_sets
wxchan's avatar
wxchan committed
829
830
        return self

831
832
833
834
    fit.__doc__ = _lgbmmodel_doc_fit.format(
        X_shape="array-like or sparse matrix of shape = [n_samples, n_features]",
        y_shape="array-like of shape = [n_samples]",
        sample_weight_shape="array-like of shape = [n_samples] or None, optional (default=None)",
835
        init_score_shape="array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task) or shape = [n_samples, n_classes] (for multi-class task) or None, optional (default=None)",
836
        group_shape="array-like or None, optional (default=None)",
837
838
839
        eval_sample_weight_shape="list of array, or None, optional (default=None)",
        eval_init_score_shape="list of array, or None, optional (default=None)",
        eval_group_shape="list of array, or None, optional (default=None)"
840
841
    ) + "\n\n" + _lgbmmodel_doc_custom_eval_note

842
843
844
845
846
847
848
849
850
851
852
    def predict(
        self,
        X,
        raw_score: bool = False,
        start_iteration: int = 0,
        num_iteration: Optional[int] = None,
        pred_leaf: bool = False,
        pred_contrib: bool = False,
        validate_features: bool = False,
        **kwargs: Any
    ):
853
        """Docstring is set after definition, using a template."""
854
        if not self.__sklearn_is_fitted__():
855
            raise LGBMNotFittedError("Estimator not fitted, call fit before exploiting the model.")
856
        if not isinstance(X, (pd_DataFrame, dt_DataTable)):
857
            X = _LGBMCheckArray(X, accept_sparse=True, force_all_finite=False)
858
859
860
        n_features = X.shape[1]
        if self._n_features != n_features:
            raise ValueError("Number of features of the model must "
861
862
                             f"match the input. Model n_features_ is {self._n_features} and "
                             f"input n_features is {n_features}")
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
        # retrive original params that possibly can be used in both training and prediction
        # and then overwrite them (considering aliases) with params that were passed directly in prediction
        predict_params = self._process_params(stage="predict")
        for alias in _ConfigAliases.get_by_alias(
            "data",
            "X",
            "raw_score",
            "start_iteration",
            "num_iteration",
            "pred_leaf",
            "pred_contrib",
            *kwargs.keys()
        ):
            predict_params.pop(alias, None)
        predict_params.update(kwargs)
878
879
880

        # number of threads can have values with special meaning which is only applied
        # in the scikit-learn interface, these should not reach the c++ side as-is
881
882
        predict_params = _choose_param_value("num_threads", predict_params, self.n_jobs)
        predict_params["num_threads"] = self._process_n_jobs(predict_params["num_threads"])
883

884
885
886
887
888
        return self._Booster.predict(  # type: ignore[union-attr]
            X, raw_score=raw_score, start_iteration=start_iteration, num_iteration=num_iteration,
            pred_leaf=pred_leaf, pred_contrib=pred_contrib, validate_features=validate_features,
            **predict_params
        )
wxchan's avatar
wxchan committed
889

890
891
892
893
894
895
896
897
898
    predict.__doc__ = _lgbmmodel_doc_predict.format(
        description="Return the predicted value for each sample.",
        X_shape="array-like or sparse matrix of shape = [n_samples, n_features]",
        output_name="predicted_result",
        predicted_result_shape="array-like of shape = [n_samples] or shape = [n_samples, n_classes]",
        X_leaves_shape="array-like of shape = [n_samples, n_trees] or shape = [n_samples, n_trees * n_classes]",
        X_SHAP_values_shape="array-like of shape = [n_samples, n_features + 1] or shape = [n_samples, (n_features + 1) * n_classes] or list with n_classes length of such objects"
    )

899
    @property
900
    def n_features_(self) -> int:
901
        """:obj:`int`: The number of features of fitted model."""
902
        if not self.__sklearn_is_fitted__():
903
904
905
            raise LGBMNotFittedError('No n_features found. Need to call fit beforehand.')
        return self._n_features

906
    @property
907
    def n_features_in_(self) -> int:
908
        """:obj:`int`: The number of features of fitted model."""
909
        if not self.__sklearn_is_fitted__():
910
911
912
            raise LGBMNotFittedError('No n_features_in found. Need to call fit beforehand.')
        return self._n_features_in

913
    @property
914
    def best_score_(self) -> _LGBM_BoosterBestScoreType:
915
        """:obj:`dict`: The best score of fitted model."""
916
        if not self.__sklearn_is_fitted__():
917
918
919
920
            raise LGBMNotFittedError('No best_score found. Need to call fit beforehand.')
        return self._best_score

    @property
921
    def best_iteration_(self) -> int:
922
        """:obj:`int`: The best iteration of fitted model if ``early_stopping()`` callback has been specified."""
923
        if not self.__sklearn_is_fitted__():
924
            raise LGBMNotFittedError('No best_iteration found. Need to call fit with early_stopping callback beforehand.')
925
926
927
        return self._best_iteration

    @property
928
    def objective_(self) -> Union[str, _LGBM_ScikitCustomObjectiveFunction]:
929
        """:obj:`str` or :obj:`callable`: The concrete objective used while fitting this model."""
930
        if not self.__sklearn_is_fitted__():
931
932
933
            raise LGBMNotFittedError('No objective found. Need to call fit beforehand.')
        return self._objective

934
935
936
937
938
939
940
941
942
    @property
    def n_estimators_(self) -> int:
        """:obj:`int`: True number of boosting iterations performed.

        This might be less than parameter ``n_estimators`` if early stopping was enabled or
        if boosting stopped early due to limits on complexity like ``min_gain_to_split``.
        """
        if not self.__sklearn_is_fitted__():
            raise LGBMNotFittedError('No n_estimators found. Need to call fit beforehand.')
943
        return self._Booster.current_iteration()  # type: ignore
944
945
946
947
948
949
950
951
952
953

    @property
    def n_iter_(self) -> int:
        """:obj:`int`: True number of boosting iterations performed.

        This might be less than parameter ``n_estimators`` if early stopping was enabled or
        if boosting stopped early due to limits on complexity like ``min_gain_to_split``.
        """
        if not self.__sklearn_is_fitted__():
            raise LGBMNotFittedError('No n_iter found. Need to call fit beforehand.')
954
        return self._Booster.current_iteration()  # type: ignore
955

956
    @property
957
    def booster_(self) -> Booster:
958
        """Booster: The underlying Booster of this model."""
959
        if not self.__sklearn_is_fitted__():
960
            raise LGBMNotFittedError('No booster found. Need to call fit beforehand.')
961
        return self._Booster  # type: ignore[return-value]
wxchan's avatar
wxchan committed
962

963
    @property
964
    def evals_result_(self) -> _EvalResultDict:
965
        """:obj:`dict`: The evaluation results if validation sets have been specified."""
966
        if not self.__sklearn_is_fitted__():
967
968
            raise LGBMNotFittedError('No results found. Need to call fit with eval_set beforehand.')
        return self._evals_result
969
970

    @property
971
    def feature_importances_(self) -> np.ndarray:
972
        """:obj:`array` of shape = [n_features]: The feature importances (the higher, the more important).
973

Nikita Titov's avatar
Nikita Titov committed
974
975
976
977
        .. note::

            ``importance_type`` attribute is passed to the function
            to configure the type of importance values to be extracted.
978
        """
979
        if not self.__sklearn_is_fitted__():
980
            raise LGBMNotFittedError('No feature_importances found. Need to call fit beforehand.')
981
        return self._Booster.feature_importance(importance_type=self.importance_type)  # type: ignore[union-attr]
wxchan's avatar
wxchan committed
982

983
    @property
984
985
    def feature_name_(self) -> List[str]:
        """:obj:`list` of shape = [n_features]: The names of features."""
986
        if not self.__sklearn_is_fitted__():
987
            raise LGBMNotFittedError('No feature_name found. Need to call fit beforehand.')
988
        return self._Booster.feature_name()  # type: ignore[union-attr]
989

wxchan's avatar
wxchan committed
990

991
class LGBMRegressor(_LGBMRegressorBase, LGBMModel):
992
    """LightGBM regressor."""
wxchan's avatar
wxchan committed
993

994
    def fit(  # type: ignore[override]
995
996
997
998
999
1000
        self,
        X,
        y,
        sample_weight=None,
        init_score=None,
        eval_set=None,
1001
        eval_names: Optional[List[str]] = None,
1002
1003
        eval_sample_weight=None,
        eval_init_score=None,
1004
        eval_metric: Optional[_LGBM_ScikitEvalMetricType] = None,
1005
1006
        feature_name: _LGBM_FeatureNameConfiguration = 'auto',
        categorical_feature: _LGBM_CategoricalFeatureConfiguration = 'auto',
1007
        callbacks: Optional[List[Callable]] = None,
1008
        init_model: Optional[Union[str, Path, Booster, LGBMModel]] = None
1009
    ) -> "LGBMRegressor":
1010
        """Docstring is inherited from the LGBMModel."""
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
        super().fit(
            X,
            y,
            sample_weight=sample_weight,
            init_score=init_score,
            eval_set=eval_set,
            eval_names=eval_names,
            eval_sample_weight=eval_sample_weight,
            eval_init_score=eval_init_score,
            eval_metric=eval_metric,
            feature_name=feature_name,
            categorical_feature=categorical_feature,
            callbacks=callbacks,
            init_model=init_model
        )
Guolin Ke's avatar
Guolin Ke committed
1026
1027
        return self

1028
    _base_doc = LGBMModel.fit.__doc__.replace("self : LGBMModel", "self : LGBMRegressor")  # type: ignore
1029
1030
    _base_doc = (_base_doc[:_base_doc.find('group :')]  # type: ignore
                 + _base_doc[_base_doc.find('eval_set :'):])  # type: ignore
1031
1032
1033
1034
    _base_doc = (_base_doc[:_base_doc.find('eval_class_weight :')]
                 + _base_doc[_base_doc.find('eval_init_score :'):])
    fit.__doc__ = (_base_doc[:_base_doc.find('eval_group :')]
                   + _base_doc[_base_doc.find('eval_metric :'):])
wxchan's avatar
wxchan committed
1035

1036

1037
class LGBMClassifier(_LGBMClassifierBase, LGBMModel):
1038
    """LightGBM classifier."""
wxchan's avatar
wxchan committed
1039

1040
    def fit(  # type: ignore[override]
1041
1042
1043
1044
1045
1046
        self,
        X,
        y,
        sample_weight=None,
        init_score=None,
        eval_set=None,
1047
        eval_names: Optional[List[str]] = None,
1048
1049
1050
        eval_sample_weight=None,
        eval_class_weight=None,
        eval_init_score=None,
1051
        eval_metric: Optional[_LGBM_ScikitEvalMetricType] = None,
1052
1053
        feature_name: _LGBM_FeatureNameConfiguration = 'auto',
        categorical_feature: _LGBM_CategoricalFeatureConfiguration = 'auto',
1054
        callbacks: Optional[List[Callable]] = None,
1055
        init_model: Optional[Union[str, Path, Booster, LGBMModel]] = None
1056
    ) -> "LGBMClassifier":
1057
        """Docstring is inherited from the LGBMModel."""
1058
        _LGBMAssertAllFinite(y)
1059
1060
        _LGBMCheckClassificationTargets(y)
        self._le = _LGBMLabelEncoder().fit(y)
1061
        _y = self._le.transform(y)
1062
        self._class_map = dict(zip(self._le.classes_, self._le.transform(self._le.classes_)))
1063
1064
        if isinstance(self.class_weight, dict):
            self._class_weight = {self._class_map[k]: v for k, v in self.class_weight.items()}
1065

1066
        self._classes = self._le.classes_
1067
        self._n_classes = len(self._classes)  # type: ignore[arg-type]
1068

1069
1070
        # adjust eval metrics to match whether binary or multiclass
        # classification is being performed
1071
        if not callable(eval_metric):
1072
1073
1074
1075
1076
1077
            if isinstance(eval_metric, list):
                eval_metric_list = eval_metric
            elif isinstance(eval_metric, str):
                eval_metric_list = [eval_metric]
            else:
                eval_metric_list = []
1078
            if self._n_classes > 2:
1079
                for index, metric in enumerate(eval_metric_list):
1080
                    if metric in {'logloss', 'binary_logloss'}:
1081
                        eval_metric_list[index] = "multi_logloss"
1082
                    elif metric in {'error', 'binary_error'}:
1083
                        eval_metric_list[index] = "multi_error"
1084
            else:
1085
                for index, metric in enumerate(eval_metric_list):
1086
                    if metric in {'logloss', 'multi_logloss'}:
1087
                        eval_metric_list[index] = 'binary_logloss'
1088
                    elif metric in {'error', 'multi_error'}:
1089
1090
                        eval_metric_list[index] = 'binary_error'
            eval_metric = eval_metric_list
wxchan's avatar
wxchan committed
1091

1092
        # do not modify args, as it causes errors in model selection tools
1093
        valid_sets: Optional[List[Tuple]] = None
wxchan's avatar
wxchan committed
1094
        if eval_set is not None:
1095
1096
            if isinstance(eval_set, tuple):
                eval_set = [eval_set]
1097
1098
            valid_sets = []
            for valid_x, valid_y in eval_set:
1099
                if valid_x is X and valid_y is y:
1100
                    valid_sets.append((valid_x, _y))
1101
                else:
1102
                    valid_sets.append((valid_x, self._le.transform(valid_y)))
1103

1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
        super().fit(
            X,
            _y,
            sample_weight=sample_weight,
            init_score=init_score,
            eval_set=valid_sets,
            eval_names=eval_names,
            eval_sample_weight=eval_sample_weight,
            eval_class_weight=eval_class_weight,
            eval_init_score=eval_init_score,
            eval_metric=eval_metric,
            feature_name=feature_name,
            categorical_feature=categorical_feature,
            callbacks=callbacks,
            init_model=init_model
        )
wxchan's avatar
wxchan committed
1120
1121
        return self

1122
    _base_doc = LGBMModel.fit.__doc__.replace("self : LGBMModel", "self : LGBMClassifier")  # type: ignore
1123
1124
    _base_doc = (_base_doc[:_base_doc.find('group :')]  # type: ignore
                 + _base_doc[_base_doc.find('eval_set :'):])  # type: ignore
1125
1126
    fit.__doc__ = (_base_doc[:_base_doc.find('eval_group :')]
                   + _base_doc[_base_doc.find('eval_metric :'):])
1127

1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
    def predict(
        self,
        X,
        raw_score: bool = False,
        start_iteration: int = 0,
        num_iteration: Optional[int] = None,
        pred_leaf: bool = False,
        pred_contrib: bool = False,
        validate_features: bool = False,
        **kwargs: Any
    ):
1139
        """Docstring is inherited from the LGBMModel."""
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
        result = self.predict_proba(
            X=X,
            raw_score=raw_score,
            start_iteration=start_iteration,
            num_iteration=num_iteration,
            pred_leaf=pred_leaf,
            pred_contrib=pred_contrib,
            validate_features=validate_features,
            **kwargs
        )
1150
        if callable(self._objective) or raw_score or pred_leaf or pred_contrib:
1151
1152
1153
1154
            return result
        else:
            class_index = np.argmax(result, axis=1)
            return self._le.inverse_transform(class_index)
wxchan's avatar
wxchan committed
1155

1156
1157
    predict.__doc__ = LGBMModel.predict.__doc__

1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
    def predict_proba(
        self,
        X,
        raw_score: bool = False,
        start_iteration: int = 0,
        num_iteration: Optional[int] = None,
        pred_leaf: bool = False,
        pred_contrib: bool = False,
        validate_features: bool = False,
        **kwargs: Any
    ):
1169
        """Docstring is set after definition, using a template."""
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
        result = super().predict(
            X=X,
            raw_score=raw_score,
            start_iteration=start_iteration,
            num_iteration=num_iteration,
            pred_leaf=pred_leaf,
            pred_contrib=pred_contrib,
            validate_features=validate_features,
            **kwargs
        )
1180
        if callable(self._objective) and not (raw_score or pred_leaf or pred_contrib):
1181
1182
1183
            _log_warning("Cannot compute class probabilities or labels "
                         "due to the usage of customized objective function.\n"
                         "Returning raw scores instead.")
1184
            return result
1185
        elif self._n_classes > 2 or raw_score or pred_leaf or pred_contrib:  # type: ignore [operator]
1186
            return result
wxchan's avatar
wxchan committed
1187
        else:
1188
            return np.vstack((1. - result, result)).transpose()
1189

1190
1191
1192
1193
    predict_proba.__doc__ = _lgbmmodel_doc_predict.format(
        description="Return the predicted probability for each class for each sample.",
        X_shape="array-like or sparse matrix of shape = [n_samples, n_features]",
        output_name="predicted_probability",
1194
        predicted_result_shape="array-like of shape = [n_samples] or shape = [n_samples, n_classes]",
1195
1196
1197
1198
        X_leaves_shape="array-like of shape = [n_samples, n_trees] or shape = [n_samples, n_trees * n_classes]",
        X_SHAP_values_shape="array-like of shape = [n_samples, n_features + 1] or shape = [n_samples, (n_features + 1) * n_classes] or list with n_classes length of such objects"
    )

1199
    @property
1200
    def classes_(self) -> np.ndarray:
1201
        """:obj:`array` of shape = [n_classes]: The class label array."""
1202
        if not self.__sklearn_is_fitted__():
1203
            raise LGBMNotFittedError('No classes found. Need to call fit beforehand.')
1204
        return self._classes  # type: ignore[return-value]
1205
1206

    @property
1207
    def n_classes_(self) -> int:
1208
        """:obj:`int`: The number of classes."""
1209
        if not self.__sklearn_is_fitted__():
1210
1211
            raise LGBMNotFittedError('No classes found. Need to call fit beforehand.')
        return self._n_classes
wxchan's avatar
wxchan committed
1212

wxchan's avatar
wxchan committed
1213

wxchan's avatar
wxchan committed
1214
class LGBMRanker(LGBMModel):
1215
1216
1217
1218
1219
1220
1221
1222
    """LightGBM ranker.

    .. warning::

        scikit-learn doesn't support ranking applications yet,
        therefore this class is not really compatible with the sklearn ecosystem.
        Please use this class mainly for training and applying ranking models in common sklearnish way.
    """
wxchan's avatar
wxchan committed
1223

1224
    def fit(  # type: ignore[override]
1225
1226
1227
1228
1229
1230
1231
        self,
        X,
        y,
        sample_weight=None,
        init_score=None,
        group=None,
        eval_set=None,
1232
        eval_names: Optional[List[str]] = None,
1233
1234
1235
        eval_sample_weight=None,
        eval_init_score=None,
        eval_group=None,
1236
        eval_metric: Optional[_LGBM_ScikitEvalMetricType] = None,
1237
        eval_at: Union[List[int], Tuple[int, ...]] = (1, 2, 3, 4, 5),
1238
1239
        feature_name: _LGBM_FeatureNameConfiguration = 'auto',
        categorical_feature: _LGBM_CategoricalFeatureConfiguration = 'auto',
1240
        callbacks: Optional[List[Callable]] = None,
1241
        init_model: Optional[Union[str, Path, Booster, LGBMModel]] = None
1242
    ) -> "LGBMRanker":
1243
        """Docstring is inherited from the LGBMModel."""
1244
        # check group data
Guolin Ke's avatar
Guolin Ke committed
1245
        if group is None:
1246
            raise ValueError("Should set group for ranking task")
wxchan's avatar
wxchan committed
1247
1248

        if eval_set is not None:
Guolin Ke's avatar
Guolin Ke committed
1249
            if eval_group is None:
1250
                raise ValueError("Eval_group cannot be None when eval_set is not None")
Guolin Ke's avatar
Guolin Ke committed
1251
            elif len(eval_group) != len(eval_set):
1252
                raise ValueError("Length of eval_group should be equal to eval_set")
1253
            elif (isinstance(eval_group, dict)
1254
                  and any(i not in eval_group or eval_group[i] is None for i in range(len(eval_group)))
1255
1256
                  or isinstance(eval_group, list)
                  and any(group is None for group in eval_group)):
1257
1258
                raise ValueError("Should set group for all eval datasets for ranking task; "
                                 "if you use dict, the index should start from 0")
1259

1260
        self._eval_at = eval_at
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
        super().fit(
            X,
            y,
            sample_weight=sample_weight,
            init_score=init_score,
            group=group,
            eval_set=eval_set,
            eval_names=eval_names,
            eval_sample_weight=eval_sample_weight,
            eval_init_score=eval_init_score,
            eval_group=eval_group,
            eval_metric=eval_metric,
            feature_name=feature_name,
            categorical_feature=categorical_feature,
            callbacks=callbacks,
            init_model=init_model
        )
wxchan's avatar
wxchan committed
1278
        return self
1279

1280
    _base_doc = LGBMModel.fit.__doc__.replace("self : LGBMModel", "self : LGBMRanker")  # type: ignore
1281
1282
    fit.__doc__ = (_base_doc[:_base_doc.find('eval_class_weight :')]  # type: ignore
                   + _base_doc[_base_doc.find('eval_init_score :'):])  # type: ignore
1283
    _base_doc = fit.__doc__
1284
    _before_feature_name, _feature_name, _after_feature_name = _base_doc.partition('feature_name :')
1285
    fit.__doc__ = f"""{_before_feature_name}eval_at : list or tuple of int, optional (default=(1, 2, 3, 4, 5))
1286
        The evaluation positions of the specified metric.
1287
    {_feature_name}{_after_feature_name}"""