sklearn.py 60.5 KB
Newer Older
wxchan's avatar
wxchan committed
1
# coding: utf-8
2
"""Scikit-learn wrapper interface for LightGBM."""
3
import copy
4
from inspect import signature
5
from pathlib import Path
6
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
7

wxchan's avatar
wxchan committed
8
import numpy as np
9
import scipy.sparse
10

11
from .basic import (Booster, Dataset, LightGBMError, _choose_param_value, _ConfigAliases, _LGBM_BoosterBestScoreType,
12
                    _LGBM_CategoricalFeatureConfiguration, _LGBM_EvalFunctionResultType, _LGBM_FeatureNameConfiguration,
13
                    _LGBM_GroupType, _LGBM_InitScoreType, _LGBM_LabelType, _LGBM_WeightType, _log_warning)
14
from .callback import _EvalResultDict, record_evaluation
15
16
from .compat import (SKLEARN_INSTALLED, LGBMNotFittedError, _LGBMAssertAllFinite, _LGBMCheckArray,
                     _LGBMCheckClassificationTargets, _LGBMCheckSampleWeight, _LGBMCheckXY, _LGBMClassifierBase,
17
18
                     _LGBMComputeSampleWeight, _LGBMCpuCount, _LGBMLabelEncoder, _LGBMModelBase, _LGBMRegressorBase,
                     dt_DataTable, pd_DataFrame)
wxchan's avatar
wxchan committed
19
from .engine import train
20

21
22
23
24
25
26
27
__all__ = [
    'LGBMClassifier',
    'LGBMModel',
    'LGBMRanker',
    'LGBMRegressor',
]

28
29
30
31
32
33
34
_LGBM_ScikitMatrixLike = Union[
    dt_DataTable,
    List[Union[List[float], List[int]]],
    np.ndarray,
    pd_DataFrame,
    scipy.sparse.spmatrix
]
35
_LGBM_ScikitCustomObjectiveFunction = Union[
36
    # f(labels, preds)
37
    Callable[
38
        [Optional[np.ndarray], np.ndarray],
39
        Tuple[np.ndarray, np.ndarray]
40
    ],
41
    # f(labels, preds, weights)
42
    Callable[
43
        [Optional[np.ndarray], np.ndarray, Optional[np.ndarray]],
44
        Tuple[np.ndarray, np.ndarray]
45
    ],
46
    # f(labels, preds, weights, group)
47
    Callable[
48
        [Optional[np.ndarray], np.ndarray, Optional[np.ndarray], Optional[np.ndarray]],
49
50
        Tuple[np.ndarray, np.ndarray]
    ],
51
52
]
_LGBM_ScikitCustomEvalFunction = Union[
53
    # f(labels, preds)
54
    Callable[
55
56
        [Optional[np.ndarray], np.ndarray],
        _LGBM_EvalFunctionResultType
57
58
    ],
    Callable[
59
60
        [Optional[np.ndarray], np.ndarray],
        List[_LGBM_EvalFunctionResultType]
61
    ],
62
    # f(labels, preds, weights)
63
    Callable[
64
65
        [Optional[np.ndarray], np.ndarray, Optional[np.ndarray]],
        _LGBM_EvalFunctionResultType
66
    ],
67
68
69
70
71
72
73
74
75
76
77
78
79
    Callable[
        [Optional[np.ndarray], np.ndarray, Optional[np.ndarray]],
        List[_LGBM_EvalFunctionResultType]
    ],
    # f(labels, preds, weights, group)
    Callable[
        [Optional[np.ndarray], np.ndarray, Optional[np.ndarray], Optional[np.ndarray]],
        _LGBM_EvalFunctionResultType
    ],
    Callable[
        [Optional[np.ndarray], np.ndarray, Optional[np.ndarray], Optional[np.ndarray]],
        List[_LGBM_EvalFunctionResultType]
    ]
80
]
81
82
83
84
85
_LGBM_ScikitEvalMetricType = Union[
    str,
    _LGBM_ScikitCustomEvalFunction,
    List[Union[str, _LGBM_ScikitCustomEvalFunction]]
]
86
_LGBM_ScikitValidSet = Tuple[_LGBM_ScikitMatrixLike, _LGBM_LabelType]
87

wxchan's avatar
wxchan committed
88

89
class _ObjectiveFunctionWrapper:
90
    """Proxy class for objective function."""
91

92
    def __init__(self, func: _LGBM_ScikitCustomObjectiveFunction):
93
        """Construct a proxy class.
94

95
96
        This class transforms objective function to match objective function with signature ``new_func(preds, dataset)``
        as expected by ``lightgbm.engine.train``.
97

98
99
100
        Parameters
        ----------
        func : callable
101
102
103
104
            Expects a callable with following signatures:
            ``func(y_true, y_pred)``,
            ``func(y_true, y_pred, weight)``
            or ``func(y_true, y_pred, weight, group)``
105
106
            and returns (grad, hess):

107
                y_true : numpy 1-D array of shape = [n_samples]
108
                    The target values.
109
                y_pred : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
110
                    The predicted values.
111
112
                    Predicted values are returned before any transformation,
                    e.g. they are raw margin instead of probability of positive class for binary task.
113
114
                weight : numpy 1-D array of shape = [n_samples]
                    The weight of samples. Weights should be non-negative.
115
                group : numpy 1-D array
116
117
118
                    Group/query data.
                    Only used in the learning-to-rank task.
                    sum(group) = n_samples.
119
120
                    For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
                    where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
121
                grad : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape [n_samples, n_classes] (for multi-class task)
122
123
                    The value of the first order derivative (gradient) of the loss
                    with respect to the elements of y_pred for each sample point.
124
                hess : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
125
126
                    The value of the second order derivative (Hessian) of the loss
                    with respect to the elements of y_pred for each sample point.
wxchan's avatar
wxchan committed
127

Nikita Titov's avatar
Nikita Titov committed
128
129
        .. note::

130
            For multi-class task, y_pred is a numpy 2-D array of shape = [n_samples, n_classes],
131
            and grad and hess should be returned in the same format.
132
133
        """
        self.func = func
wxchan's avatar
wxchan committed
134

135
    def __call__(self, preds: np.ndarray, dataset: Dataset) -> Tuple[np.ndarray, np.ndarray]:
136
137
138
139
        """Call passed function with appropriate arguments.

        Parameters
        ----------
140
        preds : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
141
142
143
144
145
146
            The predicted values.
        dataset : Dataset
            The training dataset.

        Returns
        -------
147
        grad : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
148
149
            The value of the first order derivative (gradient) of the loss
            with respect to the elements of preds for each sample point.
150
        hess : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
151
152
            The value of the second order derivative (Hessian) of the loss
            with respect to the elements of preds for each sample point.
153
        """
wxchan's avatar
wxchan committed
154
        labels = dataset.get_label()
155
        argc = len(signature(self.func).parameters)
156
        if argc == 2:
157
            grad, hess = self.func(labels, preds)  # type: ignore[call-arg]
158
        elif argc == 3:
159
            grad, hess = self.func(labels, preds, dataset.get_weight())  # type: ignore[call-arg]
160
        elif argc == 4:
161
            grad, hess = self.func(labels, preds, dataset.get_weight(), dataset.get_group())  # type: ignore [call-arg]
162
        else:
163
            raise TypeError(f"Self-defined objective function should have 2, 3 or 4 arguments, got {argc}")
wxchan's avatar
wxchan committed
164
165
        return grad, hess

wxchan's avatar
wxchan committed
166

167
class _EvalFunctionWrapper:
168
    """Proxy class for evaluation function."""
169

170
    def __init__(self, func: _LGBM_ScikitCustomEvalFunction):
171
        """Construct a proxy class.
172

173
174
        This class transforms evaluation function to match evaluation function with signature ``new_func(preds, dataset)``
        as expected by ``lightgbm.engine.train``.
175

176
177
178
179
180
181
182
183
184
185
        Parameters
        ----------
        func : callable
            Expects a callable with following signatures:
            ``func(y_true, y_pred)``,
            ``func(y_true, y_pred, weight)``
            or ``func(y_true, y_pred, weight, group)``
            and returns (eval_name, eval_result, is_higher_better) or
            list of (eval_name, eval_result, is_higher_better):

186
                y_true : numpy 1-D array of shape = [n_samples]
187
                    The target values.
188
                y_pred : numpy 1-D array of shape = [n_samples] or numpy 2-D array shape = [n_samples, n_classes] (for multi-class task)
189
                    The predicted values.
190
191
                    In case of custom ``objective``, predicted values are returned before any transformation,
                    e.g. they are raw margin instead of probability of positive class for binary task in this case.
192
                weight : numpy 1-D array of shape = [n_samples]
193
                    The weight of samples. Weights should be non-negative.
194
                group : numpy 1-D array
195
196
197
                    Group/query data.
                    Only used in the learning-to-rank task.
                    sum(group) = n_samples.
198
199
                    For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
                    where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
200
                eval_name : str
Andrew Ziem's avatar
Andrew Ziem committed
201
                    The name of evaluation function (without whitespace).
202
203
204
205
206
207
                eval_result : float
                    The eval result.
                is_higher_better : bool
                    Is eval result higher better, e.g. AUC is ``is_higher_better``.
        """
        self.func = func
208

209
210
211
212
213
    def __call__(
        self,
        preds: np.ndarray,
        dataset: Dataset
    ) -> Union[_LGBM_EvalFunctionResultType, List[_LGBM_EvalFunctionResultType]]:
214
        """Call passed function with appropriate arguments.
215

216
217
        Parameters
        ----------
218
        preds : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
219
220
221
222
223
224
            The predicted values.
        dataset : Dataset
            The training dataset.

        Returns
        -------
225
        eval_name : str
Andrew Ziem's avatar
Andrew Ziem committed
226
            The name of evaluation function (without whitespace).
227
228
229
230
231
        eval_result : float
            The eval result.
        is_higher_better : bool
            Is eval result higher better, e.g. AUC is ``is_higher_better``.
        """
232
        labels = dataset.get_label()
233
        argc = len(signature(self.func).parameters)
234
        if argc == 2:
235
            return self.func(labels, preds)  # type: ignore[call-arg]
236
        elif argc == 3:
237
            return self.func(labels, preds, dataset.get_weight())  # type: ignore[call-arg]
238
        elif argc == 4:
239
            return self.func(labels, preds, dataset.get_weight(), dataset.get_group())  # type: ignore[call-arg]
240
        else:
241
            raise TypeError(f"Self-defined eval function should have 2, 3 or 4 arguments, got {argc}")
242

wxchan's avatar
wxchan committed
243

244
245
246
247
248
249
250
251
252
253
254
255
256
257
# documentation templates for LGBMModel methods are shared between the classes in
# this module and those in the ``dask`` module

_lgbmmodel_doc_fit = (
    """
    Build a gradient boosting model from the training set (X, y).

    Parameters
    ----------
    X : {X_shape}
        Input feature matrix.
    y : {y_shape}
        The target values (class labels in classification, real numbers in regression).
    sample_weight : {sample_weight_shape}
258
        Weights of training data. Weights should be non-negative.
259
    init_score : {init_score_shape}
260
261
262
263
264
265
266
267
268
        Init score of training data.
    group : {group_shape}
        Group/query data.
        Only used in the learning-to-rank task.
        sum(group) = n_samples.
        For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
        where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
    eval_set : list or None, optional (default=None)
        A list of (X, y) tuple pairs to use as validation sets.
269
    eval_names : list of str, or None, optional (default=None)
270
        Names of eval_set.
271
    eval_sample_weight : {eval_sample_weight_shape}
272
        Weights of eval data. Weights should be non-negative.
273
274
    eval_class_weight : list or None, optional (default=None)
        Class weights of eval data.
275
    eval_init_score : {eval_init_score_shape}
276
        Init score of eval data.
277
    eval_group : {eval_group_shape}
278
        Group data of eval data.
279
280
    eval_metric : str, callable, list or None, optional (default=None)
        If str, it should be a built-in evaluation metric to use.
281
282
283
284
        If callable, it should be a custom evaluation metric, see note below for more details.
        If list, it can be a list of built-in metrics, a list of custom evaluation metrics, or a mix of both.
        In either case, the ``metric`` from the model parameters will be evaluated and used as well.
        Default: 'l2' for LGBMRegressor, 'logloss' for LGBMClassifier, 'ndcg' for LGBMRanker.
285
    feature_name : list of str, or 'auto', optional (default='auto')
286
287
        Feature names.
        If 'auto' and data is pandas DataFrame, data columns names are used.
288
    categorical_feature : list of str or int, or 'auto', optional (default='auto')
289
290
        Categorical features.
        If list of int, interpreted as indices.
291
        If list of str, interpreted as feature names (need to specify ``feature_name`` as well).
292
        If 'auto' and data is pandas DataFrame, pandas unordered categorical columns are used.
293
        All values in categorical features will be cast to int32 and thus should be less than int32 max value (2147483647).
294
295
296
        Large values could be memory consuming. Consider using consecutive integers starting from zero.
        All negative values in categorical features will be treated as missing values.
        The output cannot be monotonically constrained with respect to a categorical feature.
297
        Floating point numbers in categorical features will be rounded towards 0.
298
    callbacks : list of callable, or None, optional (default=None)
299
300
        List of callback functions that are applied at each iteration.
        See Callbacks in Python API for more information.
301
    init_model : str, pathlib.Path, Booster, LGBMModel or None, optional (default=None)
302
303
304
305
        Filename of LightGBM model, Booster instance or LGBMModel instance used for continue training.

    Returns
    -------
306
    self : LGBMModel
307
308
309
310
311
312
313
314
315
316
317
318
319
        Returns self.
    """
)

_lgbmmodel_doc_custom_eval_note = """
    Note
    ----
    Custom eval function expects a callable with following signatures:
    ``func(y_true, y_pred)``, ``func(y_true, y_pred, weight)`` or
    ``func(y_true, y_pred, weight, group)``
    and returns (eval_name, eval_result, is_higher_better) or
    list of (eval_name, eval_result, is_higher_better):

320
        y_true : numpy 1-D array of shape = [n_samples]
321
            The target values.
322
        y_pred : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
323
            The predicted values.
324
325
            In case of custom ``objective``, predicted values are returned before any transformation,
            e.g. they are raw margin instead of probability of positive class for binary task in this case.
326
        weight : numpy 1-D array of shape = [n_samples]
327
            The weight of samples. Weights should be non-negative.
328
        group : numpy 1-D array
329
330
331
332
333
            Group/query data.
            Only used in the learning-to-rank task.
            sum(group) = n_samples.
            For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
            where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
334
        eval_name : str
Andrew Ziem's avatar
Andrew Ziem committed
335
            The name of evaluation function (without whitespace).
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
        eval_result : float
            The eval result.
        is_higher_better : bool
            Is eval result higher better, e.g. AUC is ``is_higher_better``.
"""

_lgbmmodel_doc_predict = (
    """
    {description}

    Parameters
    ----------
    X : {X_shape}
        Input features matrix.
    raw_score : bool, optional (default=False)
        Whether to predict raw scores.
    start_iteration : int, optional (default=0)
        Start index of the iteration to predict.
        If <= 0, starts from the first iteration.
    num_iteration : int or None, optional (default=None)
        Total number of iterations used in the prediction.
        If None, if the best iteration exists and start_iteration <= 0, the best iteration is used;
        otherwise, all iterations from ``start_iteration`` are used (no limits).
        If <= 0, all iterations from ``start_iteration`` are used (no limits).
    pred_leaf : bool, optional (default=False)
        Whether to predict leaf index.
    pred_contrib : bool, optional (default=False)
        Whether to predict feature contributions.

        .. note::

            If you want to get more explanations for your model's predictions using SHAP values,
            like SHAP interaction values,
            you can install the shap package (https://github.com/slundberg/shap).
            Note that unlike the shap package, with ``pred_contrib`` we return a matrix with an extra
            column, where the last column is the expected value.

373
374
375
    validate_features : bool, optional (default=False)
        If True, ensure that the features used to predict match the ones used to train.
        Used only if data is pandas DataFrame.
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
    **kwargs
        Other parameters for the prediction.

    Returns
    -------
    {output_name} : {predicted_result_shape}
        The predicted values.
    X_leaves : {X_leaves_shape}
        If ``pred_leaf=True``, the predicted leaf of every tree for each sample.
    X_SHAP_values : {X_SHAP_values_shape}
        If ``pred_contrib=True``, the feature contributions for each sample.
    """
)


391
392
class LGBMModel(_LGBMModelBase):
    """Implementation of the scikit-learn API for LightGBM."""
wxchan's avatar
wxchan committed
393

394
395
396
397
398
399
400
401
    def __init__(
        self,
        boosting_type: str = 'gbdt',
        num_leaves: int = 31,
        max_depth: int = -1,
        learning_rate: float = 0.1,
        n_estimators: int = 100,
        subsample_for_bin: int = 200000,
402
        objective: Optional[Union[str, _LGBM_ScikitCustomObjectiveFunction]] = None,
403
404
405
406
407
408
409
410
411
412
        class_weight: Optional[Union[Dict, str]] = None,
        min_split_gain: float = 0.,
        min_child_weight: float = 1e-3,
        min_child_samples: int = 20,
        subsample: float = 1.,
        subsample_freq: int = 0,
        colsample_bytree: float = 1.,
        reg_alpha: float = 0.,
        reg_lambda: float = 0.,
        random_state: Optional[Union[int, np.random.RandomState]] = None,
413
        n_jobs: Optional[int] = None,
414
415
416
        importance_type: str = 'split',
        **kwargs
    ):
417
        r"""Construct a gradient boosting model.
wxchan's avatar
wxchan committed
418
419
420

        Parameters
        ----------
421
        boosting_type : str, optional (default='gbdt')
422
423
424
425
            'gbdt', traditional Gradient Boosting Decision Tree.
            'dart', Dropouts meet Multiple Additive Regression Trees.
            'rf', Random Forest.
        num_leaves : int, optional (default=31)
wxchan's avatar
wxchan committed
426
            Maximum tree leaves for base learners.
427
        max_depth : int, optional (default=-1)
428
            Maximum tree depth for base learners, <=0 means no limit.
429
        learning_rate : float, optional (default=0.1)
430
            Boosting learning rate.
431
432
433
            You can use ``callbacks`` parameter of ``fit`` method to shrink/adapt learning rate
            in training using ``reset_parameter`` callback.
            Note, that this will ignore the ``learning_rate`` argument in training.
434
        n_estimators : int, optional (default=100)
wxchan's avatar
wxchan committed
435
            Number of boosted trees to fit.
436
        subsample_for_bin : int, optional (default=200000)
wxchan's avatar
wxchan committed
437
            Number of samples for constructing bins.
438
        objective : str, callable or None, optional (default=None)
wxchan's avatar
wxchan committed
439
440
            Specify the learning task and the corresponding learning objective or
            a custom objective function to be used (see note below).
441
            Default: 'regression' for LGBMRegressor, 'binary' or 'multiclass' for LGBMClassifier, 'lambdarank' for LGBMRanker.
442
443
444
445
        class_weight : dict, 'balanced' or None, optional (default=None)
            Weights associated with classes in the form ``{class_label: weight}``.
            Use this parameter only for multi-class classification task;
            for binary classification task you may use ``is_unbalance`` or ``scale_pos_weight`` parameters.
446
447
448
            Note, that the usage of all these parameters will result in poor estimates of the individual class probabilities.
            You may want to consider performing probability calibration
            (https://scikit-learn.org/stable/modules/calibration.html) of your model.
449
450
451
            The 'balanced' mode uses the values of y to automatically adjust weights
            inversely proportional to class frequencies in the input data as ``n_samples / (n_classes * np.bincount(y))``.
            If None, all classes are supposed to have weight one.
452
            Note, that these weights will be multiplied with ``sample_weight`` (passed through the ``fit`` method)
453
            if ``sample_weight`` is specified.
454
        min_split_gain : float, optional (default=0.)
wxchan's avatar
wxchan committed
455
            Minimum loss reduction required to make a further partition on a leaf node of the tree.
456
        min_child_weight : float, optional (default=1e-3)
457
            Minimum sum of instance weight (Hessian) needed in a child (leaf).
458
        min_child_samples : int, optional (default=20)
459
            Minimum number of data needed in a child (leaf).
460
        subsample : float, optional (default=1.)
wxchan's avatar
wxchan committed
461
            Subsample ratio of the training instance.
462
        subsample_freq : int, optional (default=0)
Andrew Ziem's avatar
Andrew Ziem committed
463
            Frequency of subsample, <=0 means no enable.
464
        colsample_bytree : float, optional (default=1.)
wxchan's avatar
wxchan committed
465
            Subsample ratio of columns when constructing each tree.
466
        reg_alpha : float, optional (default=0.)
467
            L1 regularization term on weights.
468
        reg_lambda : float, optional (default=0.)
469
            L2 regularization term on weights.
470
        random_state : int, RandomState object or None, optional (default=None)
wxchan's avatar
wxchan committed
471
            Random number seed.
472
473
474
            If int, this number is used to seed the C++ code.
            If RandomState object (numpy), a random integer is picked based on its state to seed the C++ code.
            If None, default seeds in C++ code are used.
475
476
477
478
479
480
481
482
483
484
485
486
        n_jobs : int or None, optional (default=None)
            Number of parallel threads to use for training (can be changed at prediction time by
            passing it as an extra keyword argument).

            For better performance, it is recommended to set this to the number of physical cores
            in the CPU.

            Negative integers are interpreted as following joblib's formula (n_cpus + 1 + n_jobs), just like
            scikit-learn (so e.g. -1 means using all threads). A value of zero corresponds the default number of
            threads configured for OpenMP in the system. A value of ``None`` (the default) corresponds
            to using the number of physical cores in the system (its correct detection requires
            either the ``joblib`` or the ``psutil`` util libraries to be installed).
487
488
489

            .. versionchanged:: 4.0.0

490
        importance_type : str, optional (default='split')
491
            The type of feature importance to be filled into ``feature_importances_``.
492
493
494
495
            If 'split', result contains numbers of times the feature is used in a model.
            If 'gain', result contains total gains of splits which use the feature.
        **kwargs
            Other parameters for the model.
wxchan's avatar
wxchan committed
496
            Check http://lightgbm.readthedocs.io/en/latest/Parameters.html for more parameters.
497

Nikita Titov's avatar
Nikita Titov committed
498
499
500
            .. warning::

                \*\*kwargs is not supported in sklearn, it may cause unexpected issues.
wxchan's avatar
wxchan committed
501
502
503

        Note
        ----
504
505
        A custom objective function can be provided for the ``objective`` parameter.
        In this case, it should have the signature
506
507
508
        ``objective(y_true, y_pred) -> grad, hess``,
        ``objective(y_true, y_pred, weight) -> grad, hess``
        or ``objective(y_true, y_pred, weight, group) -> grad, hess``:
wxchan's avatar
wxchan committed
509

510
            y_true : numpy 1-D array of shape = [n_samples]
511
                The target values.
512
            y_pred : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
513
                The predicted values.
514
515
                Predicted values are returned before any transformation,
                e.g. they are raw margin instead of probability of positive class for binary task.
516
517
            weight : numpy 1-D array of shape = [n_samples]
                The weight of samples. Weights should be non-negative.
518
            group : numpy 1-D array
519
520
521
                Group/query data.
                Only used in the learning-to-rank task.
                sum(group) = n_samples.
522
523
                For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
                where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
524
            grad : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
525
526
                The value of the first order derivative (gradient) of the loss
                with respect to the elements of y_pred for each sample point.
527
            hess : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
528
529
                The value of the second order derivative (Hessian) of the loss
                with respect to the elements of y_pred for each sample point.
wxchan's avatar
wxchan committed
530

531
        For multi-class task, y_pred is a numpy 2-D array of shape = [n_samples, n_classes],
532
        and grad and hess should be returned in the same format.
wxchan's avatar
wxchan committed
533
        """
wxchan's avatar
wxchan committed
534
        if not SKLEARN_INSTALLED:
535
536
            raise LightGBMError('scikit-learn is required for lightgbm.sklearn. '
                                'You must install scikit-learn and restart your session to use this module.')
wxchan's avatar
wxchan committed
537

538
        self.boosting_type = boosting_type
539
        self.objective = objective
wxchan's avatar
wxchan committed
540
541
542
543
        self.num_leaves = num_leaves
        self.max_depth = max_depth
        self.learning_rate = learning_rate
        self.n_estimators = n_estimators
wxchan's avatar
wxchan committed
544
        self.subsample_for_bin = subsample_for_bin
wxchan's avatar
wxchan committed
545
546
547
548
549
550
551
552
        self.min_split_gain = min_split_gain
        self.min_child_weight = min_child_weight
        self.min_child_samples = min_child_samples
        self.subsample = subsample
        self.subsample_freq = subsample_freq
        self.colsample_bytree = colsample_bytree
        self.reg_alpha = reg_alpha
        self.reg_lambda = reg_lambda
553
554
        self.random_state = random_state
        self.n_jobs = n_jobs
555
        self.importance_type = importance_type
556
        self._Booster: Optional[Booster] = None
557
558
        self._evals_result: _EvalResultDict = {}
        self._best_score: _LGBM_BoosterBestScoreType = {}
559
        self._best_iteration: int = -1
560
        self._other_params: Dict[str, Any] = {}
561
        self._objective = objective
562
        self.class_weight = class_weight
563
564
        self._class_weight: Optional[Union[Dict, str]] = None
        self._class_map: Optional[Dict[int, int]] = None
565
566
        self._n_features: int = -1
        self._n_features_in: int = -1
567
        self._classes: Optional[np.ndarray] = None
568
        self._n_classes: int = -1
569
        self.set_params(**kwargs)
wxchan's avatar
wxchan committed
570

571
    def _more_tags(self) -> Dict[str, Any]:
572
573
574
575
576
577
578
579
580
581
        return {
            'allow_nan': True,
            'X_types': ['2darray', 'sparse', '1dlabels'],
            '_xfail_checks': {
                'check_no_attributes_set_in_init':
                'scikit-learn incorrectly asserts that private attributes '
                'cannot be set in __init__: '
                '(see https://github.com/microsoft/LightGBM/issues/2628)'
            }
        }
Nikita Titov's avatar
Nikita Titov committed
582

583
584
585
    def __sklearn_is_fitted__(self) -> bool:
        return getattr(self, "fitted_", False)

586
    def get_params(self, deep: bool = True) -> Dict[str, Any]:
587
588
589
590
591
592
593
594
595
596
597
598
599
        """Get parameters for this estimator.

        Parameters
        ----------
        deep : bool, optional (default=True)
            If True, will return the parameters for this estimator and
            contained subobjects that are estimators.

        Returns
        -------
        params : dict
            Parameter names mapped to their values.
        """
600
        params = super().get_params(deep=deep)
601
        params.update(self._other_params)
wxchan's avatar
wxchan committed
602
603
        return params

604
    def set_params(self, **params: Any) -> "LGBMModel":
605
606
607
608
609
610
611
612
613
614
615
616
        """Set the parameters of this estimator.

        Parameters
        ----------
        **params
            Parameter names with their new values.

        Returns
        -------
        self : object
            Returns self.
        """
wxchan's avatar
wxchan committed
617
618
        for key, value in params.items():
            setattr(self, key, value)
619
620
            if hasattr(self, f"_{key}"):
                setattr(self, f"_{key}", value)
621
            self._other_params[key] = value
wxchan's avatar
wxchan committed
622
        return self
wxchan's avatar
wxchan committed
623

624
625
626
627
628
629
630
631
632
633
634
635
636
637
    def _process_params(self, stage: str) -> Dict[str, Any]:
        """Process the parameters of this estimator based on its type, parameter aliases, etc.

        Parameters
        ----------
        stage : str
            Name of the stage (can be ``fit`` or ``predict``) this method is called from.

        Returns
        -------
        processed_params : dict
            Processed parameter names mapped to their values.
        """
        assert stage in {"fit", "predict"}
638
639
640
641
642
        params = self.get_params()

        params.pop('objective', None)
        for alias in _ConfigAliases.get('objective'):
            if alias in params:
643
                obj = params.pop(alias)
644
                _log_warning(f"Found '{alias}' in params. Will use it instead of 'objective' argument")
645
646
647
648
649
650
651
652
653
654
655
656
657
                if stage == "fit":
                    self._objective = obj
        if stage == "fit":
            if self._objective is None:
                if isinstance(self, LGBMRegressor):
                    self._objective = "regression"
                elif isinstance(self, LGBMClassifier):
                    if self._n_classes > 2:
                        self._objective = "multiclass"
                    else:
                        self._objective = "binary"
                elif isinstance(self, LGBMRanker):
                    self._objective = "lambdarank"
658
                else:
659
                    raise ValueError("Unknown LGBMModel type.")
660
        if callable(self._objective):
661
            if stage == "fit":
662
663
664
                params['objective'] = _ObjectiveFunctionWrapper(self._objective)
            else:
                params['objective'] = 'None'
665
        else:
666
            params['objective'] = self._objective
667

668
        params.pop('importance_type', None)
wxchan's avatar
wxchan committed
669
        params.pop('n_estimators', None)
670
        params.pop('class_weight', None)
671

672
673
        if isinstance(params['random_state'], np.random.RandomState):
            params['random_state'] = params['random_state'].randint(np.iinfo(np.int32).max)
674
        if self._n_classes > 2:
675
676
            for alias in _ConfigAliases.get('num_class'):
                params.pop(alias, None)
677
678
            params['num_class'] = self._n_classes
        if hasattr(self, '_eval_at'):
679
            eval_at = self._eval_at
680
            for alias in _ConfigAliases.get('eval_at'):
681
682
683
684
                if alias in params:
                    _log_warning(f"Found '{alias}' in params. Will use it instead of 'eval_at' argument")
                    eval_at = params.pop(alias)
            params['eval_at'] = eval_at
wxchan's avatar
wxchan committed
685

686
        # register default metric for consistency with callable eval_metric case
687
        original_metric = self._objective if isinstance(self._objective, str) else None
688
689
690
691
692
693
694
695
696
697
        if original_metric is None:
            # try to deduce from class instance
            if isinstance(self, LGBMRegressor):
                original_metric = "l2"
            elif isinstance(self, LGBMClassifier):
                original_metric = "multi_logloss" if self._n_classes > 2 else "binary_logloss"
            elif isinstance(self, LGBMRanker):
                original_metric = "ndcg"

        # overwrite default metric by explicitly set metric
698
        params = _choose_param_value("metric", params, original_metric)
699

700
701
702
703
704
705
        # use joblib conventions for negative n_jobs, just like scikit-learn
        # at predict time, this is handled later due to the order of parameter updates
        if stage == "fit":
            params = _choose_param_value("num_threads", params, self.n_jobs)
            params["num_threads"] = self._process_n_jobs(params["num_threads"])

706
707
        return params

708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
    def _process_n_jobs(self, n_jobs: Optional[int]) -> int:
        """Convert special values of n_jobs to their actual values according to the formulas that apply.

        Parameters
        ----------
        n_jobs : int or None
            The original value of n_jobs, potentially having special values such as 'None' or
            negative integers.

        Returns
        -------
        n_jobs : int
            The value of n_jobs with special values converted to actual number of threads.
        """
        if n_jobs is None:
            n_jobs = _LGBMCpuCount(only_physical_cores=True)
        elif n_jobs < 0:
            n_jobs = max(_LGBMCpuCount(only_physical_cores=False) + 1 + n_jobs, 1)
        return n_jobs

728
729
    def fit(
        self,
730
731
        X: _LGBM_ScikitMatrixLike,
        y: _LGBM_LabelType,
732
733
        sample_weight: Optional[_LGBM_WeightType] = None,
        init_score: Optional[_LGBM_InitScoreType] = None,
734
        group: Optional[_LGBM_GroupType] = None,
735
        eval_set: Optional[List[_LGBM_ScikitValidSet]] = None,
736
        eval_names: Optional[List[str]] = None,
737
738
739
740
        eval_sample_weight: Optional[List[_LGBM_WeightType]] = None,
        eval_class_weight: Optional[List[float]] = None,
        eval_init_score: Optional[List[_LGBM_InitScoreType]] = None,
        eval_group: Optional[List[_LGBM_GroupType]] = None,
741
        eval_metric: Optional[_LGBM_ScikitEvalMetricType] = None,
742
743
        feature_name: _LGBM_FeatureNameConfiguration = 'auto',
        categorical_feature: _LGBM_CategoricalFeatureConfiguration = 'auto',
744
        callbacks: Optional[List[Callable]] = None,
745
        init_model: Optional[Union[str, Path, Booster, "LGBMModel"]] = None
746
    ) -> "LGBMModel":
747
748
749
750
751
        """Docstring is set after definition, using a template."""
        params = self._process_params(stage="fit")

        # Do not modify original args in fit function
        # Refer to https://github.com/microsoft/LightGBM/pull/2619
752
753
754
755
756
757
758
        eval_metric_list: List[Union[str, _LGBM_ScikitCustomEvalFunction]]
        if eval_metric is None:
            eval_metric_list = []
        elif isinstance(eval_metric, list):
            eval_metric_list = copy.deepcopy(eval_metric)
        else:
            eval_metric_list = [copy.deepcopy(eval_metric)]
759
760
761
762
763

        # Separate built-in from callable evaluation metrics
        eval_metrics_callable = [_EvalFunctionWrapper(f) for f in eval_metric_list if callable(f)]
        eval_metrics_builtin = [m for m in eval_metric_list if isinstance(m, str)]

764
        # concatenate metric from params (or default if not provided in params) and eval_metric
765
766
        params['metric'] = [params['metric']] if isinstance(params['metric'], (str, type(None))) else params['metric']
        params['metric'] = [e for e in eval_metrics_builtin if e not in params['metric']] + params['metric']
767
        params['metric'] = [metric for metric in params['metric'] if metric is not None]
wxchan's avatar
wxchan committed
768

769
        if not isinstance(X, (pd_DataFrame, dt_DataTable)):
770
            _X, _y = _LGBMCheckXY(X, y, accept_sparse=True, force_all_finite=False, ensure_min_samples=2)
771
772
            if sample_weight is not None:
                sample_weight = _LGBMCheckSampleWeight(sample_weight, _X)
773
774
        else:
            _X, _y = X, y
775

776
777
778
779
        if self._class_weight is None:
            self._class_weight = self.class_weight
        if self._class_weight is not None:
            class_sample_weight = _LGBMComputeSampleWeight(self._class_weight, y)
780
781
782
783
            if sample_weight is None or len(sample_weight) == 0:
                sample_weight = class_sample_weight
            else:
                sample_weight = np.multiply(sample_weight, class_sample_weight)
784

785
        self._n_features = _X.shape[1]
786
787
        # copy for consistency
        self._n_features_in = self._n_features
788

789
790
791
        train_set = Dataset(data=_X, label=_y, weight=sample_weight, group=group,
                            init_score=init_score, categorical_feature=categorical_feature,
                            params=params)
Guolin Ke's avatar
Guolin Ke committed
792

793
        valid_sets: List[Dataset] = []
Guolin Ke's avatar
Guolin Ke committed
794
        if eval_set is not None:
795

796
            def _get_meta_data(collection, name, i):
797
798
799
800
801
802
803
                if collection is None:
                    return None
                elif isinstance(collection, list):
                    return collection[i] if len(collection) > i else None
                elif isinstance(collection, dict):
                    return collection.get(i, None)
                else:
804
                    raise TypeError(f"{name} should be dict or list")
805

Guolin Ke's avatar
Guolin Ke committed
806
807
808
            if isinstance(eval_set, tuple):
                eval_set = [eval_set]
            for i, valid_data in enumerate(eval_set):
809
                # reduce cost for prediction training data
Guolin Ke's avatar
Guolin Ke committed
810
811
812
                if valid_data[0] is X and valid_data[1] is y:
                    valid_set = train_set
                else:
813
814
815
816
817
818
                    valid_weight = _get_meta_data(eval_sample_weight, 'eval_sample_weight', i)
                    valid_class_weight = _get_meta_data(eval_class_weight, 'eval_class_weight', i)
                    if valid_class_weight is not None:
                        if isinstance(valid_class_weight, dict) and self._class_map is not None:
                            valid_class_weight = {self._class_map[k]: v for k, v in valid_class_weight.items()}
                        valid_class_sample_weight = _LGBMComputeSampleWeight(valid_class_weight, valid_data[1])
819
820
821
822
                        if valid_weight is None or len(valid_weight) == 0:
                            valid_weight = valid_class_sample_weight
                        else:
                            valid_weight = np.multiply(valid_weight, valid_class_sample_weight)
823
824
                    valid_init_score = _get_meta_data(eval_init_score, 'eval_init_score', i)
                    valid_group = _get_meta_data(eval_group, 'eval_group', i)
825
826
827
828
                    valid_set = Dataset(data=valid_data[0], label=valid_data[1], weight=valid_weight,
                                        group=valid_group, init_score=valid_init_score,
                                        categorical_feature='auto', params=params)

Guolin Ke's avatar
Guolin Ke committed
829
830
                valid_sets.append(valid_set)

831
832
833
        if isinstance(init_model, LGBMModel):
            init_model = init_model.booster_

834
835
836
        if callbacks is None:
            callbacks = []
        else:
837
            callbacks = copy.copy(callbacks)  # don't use deepcopy here to allow non-serializable objects
838

839
        evals_result: _EvalResultDict = {}
840
841
842
843
844
845
846
847
        callbacks.append(record_evaluation(evals_result))

        self._Booster = train(
            params=params,
            train_set=train_set,
            num_boost_round=self.n_estimators,
            valid_sets=valid_sets,
            valid_names=eval_names,
848
            feval=eval_metrics_callable,  # type: ignore[arg-type]
849
850
851
852
            init_model=init_model,
            feature_name=feature_name,
            callbacks=callbacks
        )
wxchan's avatar
wxchan committed
853

854
        self._evals_result = evals_result
855
        self._best_iteration = self._Booster.best_iteration
856
        self._best_score = self._Booster.best_score
wxchan's avatar
wxchan committed
857

858
859
        self.fitted_ = True

wxchan's avatar
wxchan committed
860
        # free dataset
861
        self._Booster.free_dataset()
wxchan's avatar
wxchan committed
862
        del train_set, valid_sets
wxchan's avatar
wxchan committed
863
864
        return self

865
    fit.__doc__ = _lgbmmodel_doc_fit.format(
866
867
        X_shape="numpy array, pandas DataFrame, H2O DataTable's Frame , scipy.sparse, list of lists of int or float of shape = [n_samples, n_features]",
        y_shape="numpy array, pandas DataFrame, pandas Series, list of int or float of shape = [n_samples]",
868
869
        sample_weight_shape="numpy array, pandas Series, list of int or float of shape = [n_samples] or None, optional (default=None)",
        init_score_shape="numpy array, pandas DataFrame, pandas Series, list of int or float of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task) or shape = [n_samples, n_classes] (for multi-class task) or None, optional (default=None)",
870
        group_shape="numpy array, pandas Series, list of int or float, or None, optional (default=None)",
871
872
873
        eval_sample_weight_shape="list of array (same types as ``sample_weight`` supports), or None, optional (default=None)",
        eval_init_score_shape="list of array (same types as ``init_score`` supports), or None, optional (default=None)",
        eval_group_shape="list of array (same types as ``group`` supports), or None, optional (default=None)"
874
875
    ) + "\n\n" + _lgbmmodel_doc_custom_eval_note

876
877
    def predict(
        self,
878
        X: _LGBM_ScikitMatrixLike,
879
880
881
882
883
884
885
886
        raw_score: bool = False,
        start_iteration: int = 0,
        num_iteration: Optional[int] = None,
        pred_leaf: bool = False,
        pred_contrib: bool = False,
        validate_features: bool = False,
        **kwargs: Any
    ):
887
        """Docstring is set after definition, using a template."""
888
        if not self.__sklearn_is_fitted__():
889
            raise LGBMNotFittedError("Estimator not fitted, call fit before exploiting the model.")
890
        if not isinstance(X, (pd_DataFrame, dt_DataTable)):
891
            X = _LGBMCheckArray(X, accept_sparse=True, force_all_finite=False)
892
893
894
        n_features = X.shape[1]
        if self._n_features != n_features:
            raise ValueError("Number of features of the model must "
895
896
                             f"match the input. Model n_features_ is {self._n_features} and "
                             f"input n_features is {n_features}")
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
        # retrive original params that possibly can be used in both training and prediction
        # and then overwrite them (considering aliases) with params that were passed directly in prediction
        predict_params = self._process_params(stage="predict")
        for alias in _ConfigAliases.get_by_alias(
            "data",
            "X",
            "raw_score",
            "start_iteration",
            "num_iteration",
            "pred_leaf",
            "pred_contrib",
            *kwargs.keys()
        ):
            predict_params.pop(alias, None)
        predict_params.update(kwargs)
912
913
914

        # number of threads can have values with special meaning which is only applied
        # in the scikit-learn interface, these should not reach the c++ side as-is
915
916
        predict_params = _choose_param_value("num_threads", predict_params, self.n_jobs)
        predict_params["num_threads"] = self._process_n_jobs(predict_params["num_threads"])
917

918
919
920
921
922
        return self._Booster.predict(  # type: ignore[union-attr]
            X, raw_score=raw_score, start_iteration=start_iteration, num_iteration=num_iteration,
            pred_leaf=pred_leaf, pred_contrib=pred_contrib, validate_features=validate_features,
            **predict_params
        )
wxchan's avatar
wxchan committed
923

924
925
    predict.__doc__ = _lgbmmodel_doc_predict.format(
        description="Return the predicted value for each sample.",
926
        X_shape="numpy array, pandas DataFrame, H2O DataTable's Frame , scipy.sparse, list of lists of int or float of shape = [n_samples, n_features]",
927
928
929
930
931
932
        output_name="predicted_result",
        predicted_result_shape="array-like of shape = [n_samples] or shape = [n_samples, n_classes]",
        X_leaves_shape="array-like of shape = [n_samples, n_trees] or shape = [n_samples, n_trees * n_classes]",
        X_SHAP_values_shape="array-like of shape = [n_samples, n_features + 1] or shape = [n_samples, (n_features + 1) * n_classes] or list with n_classes length of such objects"
    )

933
    @property
934
    def n_features_(self) -> int:
935
        """:obj:`int`: The number of features of fitted model."""
936
        if not self.__sklearn_is_fitted__():
937
938
939
            raise LGBMNotFittedError('No n_features found. Need to call fit beforehand.')
        return self._n_features

940
    @property
941
    def n_features_in_(self) -> int:
942
        """:obj:`int`: The number of features of fitted model."""
943
        if not self.__sklearn_is_fitted__():
944
945
946
            raise LGBMNotFittedError('No n_features_in found. Need to call fit beforehand.')
        return self._n_features_in

947
    @property
948
    def best_score_(self) -> _LGBM_BoosterBestScoreType:
949
        """:obj:`dict`: The best score of fitted model."""
950
        if not self.__sklearn_is_fitted__():
951
952
953
954
            raise LGBMNotFittedError('No best_score found. Need to call fit beforehand.')
        return self._best_score

    @property
955
    def best_iteration_(self) -> int:
956
        """:obj:`int`: The best iteration of fitted model if ``early_stopping()`` callback has been specified."""
957
        if not self.__sklearn_is_fitted__():
958
            raise LGBMNotFittedError('No best_iteration found. Need to call fit with early_stopping callback beforehand.')
959
960
961
        return self._best_iteration

    @property
962
    def objective_(self) -> Union[str, _LGBM_ScikitCustomObjectiveFunction]:
963
        """:obj:`str` or :obj:`callable`: The concrete objective used while fitting this model."""
964
        if not self.__sklearn_is_fitted__():
965
            raise LGBMNotFittedError('No objective found. Need to call fit beforehand.')
966
        return self._objective  # type: ignore[return-value]
967

968
969
970
971
972
973
    @property
    def n_estimators_(self) -> int:
        """:obj:`int`: True number of boosting iterations performed.

        This might be less than parameter ``n_estimators`` if early stopping was enabled or
        if boosting stopped early due to limits on complexity like ``min_gain_to_split``.
974
975
        
        .. versionadded:: 4.0.0
976
977
978
        """
        if not self.__sklearn_is_fitted__():
            raise LGBMNotFittedError('No n_estimators found. Need to call fit beforehand.')
979
        return self._Booster.current_iteration()  # type: ignore
980
981
982
983
984
985
986

    @property
    def n_iter_(self) -> int:
        """:obj:`int`: True number of boosting iterations performed.

        This might be less than parameter ``n_estimators`` if early stopping was enabled or
        if boosting stopped early due to limits on complexity like ``min_gain_to_split``.
987
988
        
        .. versionadded:: 4.0.0
989
990
991
        """
        if not self.__sklearn_is_fitted__():
            raise LGBMNotFittedError('No n_iter found. Need to call fit beforehand.')
992
        return self._Booster.current_iteration()  # type: ignore
993

994
    @property
995
    def booster_(self) -> Booster:
996
        """Booster: The underlying Booster of this model."""
997
        if not self.__sklearn_is_fitted__():
998
            raise LGBMNotFittedError('No booster found. Need to call fit beforehand.')
999
        return self._Booster  # type: ignore[return-value]
wxchan's avatar
wxchan committed
1000

1001
    @property
1002
    def evals_result_(self) -> _EvalResultDict:
1003
        """:obj:`dict`: The evaluation results if validation sets have been specified."""
1004
        if not self.__sklearn_is_fitted__():
1005
1006
            raise LGBMNotFittedError('No results found. Need to call fit with eval_set beforehand.')
        return self._evals_result
1007
1008

    @property
1009
    def feature_importances_(self) -> np.ndarray:
1010
        """:obj:`array` of shape = [n_features]: The feature importances (the higher, the more important).
1011

Nikita Titov's avatar
Nikita Titov committed
1012
1013
1014
1015
        .. note::

            ``importance_type`` attribute is passed to the function
            to configure the type of importance values to be extracted.
1016
        """
1017
        if not self.__sklearn_is_fitted__():
1018
            raise LGBMNotFittedError('No feature_importances found. Need to call fit beforehand.')
1019
        return self._Booster.feature_importance(importance_type=self.importance_type)  # type: ignore[union-attr]
wxchan's avatar
wxchan committed
1020

1021
    @property
1022
1023
    def feature_name_(self) -> List[str]:
        """:obj:`list` of shape = [n_features]: The names of features."""
1024
        if not self.__sklearn_is_fitted__():
1025
            raise LGBMNotFittedError('No feature_name found. Need to call fit beforehand.')
1026
        return self._Booster.feature_name()  # type: ignore[union-attr]
1027

wxchan's avatar
wxchan committed
1028

1029
class LGBMRegressor(_LGBMRegressorBase, LGBMModel):
1030
    """LightGBM regressor."""
wxchan's avatar
wxchan committed
1031

1032
    def fit(  # type: ignore[override]
1033
        self,
1034
1035
        X: _LGBM_ScikitMatrixLike,
        y: _LGBM_LabelType,
1036
1037
1038
        sample_weight: Optional[_LGBM_WeightType] = None,
        init_score: Optional[_LGBM_InitScoreType] = None,
        eval_set: Optional[List[_LGBM_ScikitValidSet]] = None,
1039
        eval_names: Optional[List[str]] = None,
1040
1041
        eval_sample_weight: Optional[List[_LGBM_WeightType]] = None,
        eval_init_score: Optional[List[_LGBM_InitScoreType]] = None,
1042
        eval_metric: Optional[_LGBM_ScikitEvalMetricType] = None,
1043
1044
        feature_name: _LGBM_FeatureNameConfiguration = 'auto',
        categorical_feature: _LGBM_CategoricalFeatureConfiguration = 'auto',
1045
        callbacks: Optional[List[Callable]] = None,
1046
        init_model: Optional[Union[str, Path, Booster, LGBMModel]] = None
1047
    ) -> "LGBMRegressor":
1048
        """Docstring is inherited from the LGBMModel."""
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
        super().fit(
            X,
            y,
            sample_weight=sample_weight,
            init_score=init_score,
            eval_set=eval_set,
            eval_names=eval_names,
            eval_sample_weight=eval_sample_weight,
            eval_init_score=eval_init_score,
            eval_metric=eval_metric,
            feature_name=feature_name,
            categorical_feature=categorical_feature,
            callbacks=callbacks,
            init_model=init_model
        )
Guolin Ke's avatar
Guolin Ke committed
1064
1065
        return self

1066
    _base_doc = LGBMModel.fit.__doc__.replace("self : LGBMModel", "self : LGBMRegressor")  # type: ignore
1067
1068
    _base_doc = (_base_doc[:_base_doc.find('group :')]  # type: ignore
                 + _base_doc[_base_doc.find('eval_set :'):])  # type: ignore
1069
1070
1071
1072
    _base_doc = (_base_doc[:_base_doc.find('eval_class_weight :')]
                 + _base_doc[_base_doc.find('eval_init_score :'):])
    fit.__doc__ = (_base_doc[:_base_doc.find('eval_group :')]
                   + _base_doc[_base_doc.find('eval_metric :'):])
wxchan's avatar
wxchan committed
1073

1074

1075
class LGBMClassifier(_LGBMClassifierBase, LGBMModel):
1076
    """LightGBM classifier."""
wxchan's avatar
wxchan committed
1077

1078
    def fit(  # type: ignore[override]
1079
        self,
1080
1081
        X: _LGBM_ScikitMatrixLike,
        y: _LGBM_LabelType,
1082
1083
1084
        sample_weight: Optional[_LGBM_WeightType] = None,
        init_score: Optional[_LGBM_InitScoreType] = None,
        eval_set: Optional[List[_LGBM_ScikitValidSet]] = None,
1085
        eval_names: Optional[List[str]] = None,
1086
1087
1088
        eval_sample_weight: Optional[List[_LGBM_WeightType]] = None,
        eval_class_weight: Optional[List[float]] = None,
        eval_init_score: Optional[List[_LGBM_InitScoreType]] = None,
1089
        eval_metric: Optional[_LGBM_ScikitEvalMetricType] = None,
1090
1091
        feature_name: _LGBM_FeatureNameConfiguration = 'auto',
        categorical_feature: _LGBM_CategoricalFeatureConfiguration = 'auto',
1092
        callbacks: Optional[List[Callable]] = None,
1093
        init_model: Optional[Union[str, Path, Booster, LGBMModel]] = None
1094
    ) -> "LGBMClassifier":
1095
        """Docstring is inherited from the LGBMModel."""
1096
        _LGBMAssertAllFinite(y)
1097
1098
        _LGBMCheckClassificationTargets(y)
        self._le = _LGBMLabelEncoder().fit(y)
1099
        _y = self._le.transform(y)
1100
        self._class_map = dict(zip(self._le.classes_, self._le.transform(self._le.classes_)))
1101
1102
        if isinstance(self.class_weight, dict):
            self._class_weight = {self._class_map[k]: v for k, v in self.class_weight.items()}
1103

1104
        self._classes = self._le.classes_
1105
        self._n_classes = len(self._classes)  # type: ignore[arg-type]
1106
1107
        if self.objective is None:
            self._objective = None
1108

1109
1110
        # adjust eval metrics to match whether binary or multiclass
        # classification is being performed
1111
        if not callable(eval_metric):
1112
1113
1114
1115
1116
1117
            if isinstance(eval_metric, list):
                eval_metric_list = eval_metric
            elif isinstance(eval_metric, str):
                eval_metric_list = [eval_metric]
            else:
                eval_metric_list = []
1118
            if self._n_classes > 2:
1119
                for index, metric in enumerate(eval_metric_list):
1120
                    if metric in {'logloss', 'binary_logloss'}:
1121
                        eval_metric_list[index] = "multi_logloss"
1122
                    elif metric in {'error', 'binary_error'}:
1123
                        eval_metric_list[index] = "multi_error"
1124
            else:
1125
                for index, metric in enumerate(eval_metric_list):
1126
                    if metric in {'logloss', 'multi_logloss'}:
1127
                        eval_metric_list[index] = 'binary_logloss'
1128
                    elif metric in {'error', 'multi_error'}:
1129
1130
                        eval_metric_list[index] = 'binary_error'
            eval_metric = eval_metric_list
wxchan's avatar
wxchan committed
1131

1132
        # do not modify args, as it causes errors in model selection tools
1133
        valid_sets: Optional[List[_LGBM_ScikitValidSet]] = None
wxchan's avatar
wxchan committed
1134
        if eval_set is not None:
1135
1136
            if isinstance(eval_set, tuple):
                eval_set = [eval_set]
1137
1138
            valid_sets = []
            for valid_x, valid_y in eval_set:
1139
                if valid_x is X and valid_y is y:
1140
                    valid_sets.append((valid_x, _y))
1141
                else:
1142
                    valid_sets.append((valid_x, self._le.transform(valid_y)))
1143

1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
        super().fit(
            X,
            _y,
            sample_weight=sample_weight,
            init_score=init_score,
            eval_set=valid_sets,
            eval_names=eval_names,
            eval_sample_weight=eval_sample_weight,
            eval_class_weight=eval_class_weight,
            eval_init_score=eval_init_score,
            eval_metric=eval_metric,
            feature_name=feature_name,
            categorical_feature=categorical_feature,
            callbacks=callbacks,
            init_model=init_model
        )
wxchan's avatar
wxchan committed
1160
1161
        return self

1162
    _base_doc = LGBMModel.fit.__doc__.replace("self : LGBMModel", "self : LGBMClassifier")  # type: ignore
1163
1164
    _base_doc = (_base_doc[:_base_doc.find('group :')]  # type: ignore
                 + _base_doc[_base_doc.find('eval_set :'):])  # type: ignore
1165
1166
    fit.__doc__ = (_base_doc[:_base_doc.find('eval_group :')]
                   + _base_doc[_base_doc.find('eval_metric :'):])
1167

1168
1169
    def predict(
        self,
1170
        X: _LGBM_ScikitMatrixLike,
1171
1172
1173
1174
1175
1176
1177
1178
        raw_score: bool = False,
        start_iteration: int = 0,
        num_iteration: Optional[int] = None,
        pred_leaf: bool = False,
        pred_contrib: bool = False,
        validate_features: bool = False,
        **kwargs: Any
    ):
1179
        """Docstring is inherited from the LGBMModel."""
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
        result = self.predict_proba(
            X=X,
            raw_score=raw_score,
            start_iteration=start_iteration,
            num_iteration=num_iteration,
            pred_leaf=pred_leaf,
            pred_contrib=pred_contrib,
            validate_features=validate_features,
            **kwargs
        )
1190
        if callable(self._objective) or raw_score or pred_leaf or pred_contrib:
1191
1192
1193
1194
            return result
        else:
            class_index = np.argmax(result, axis=1)
            return self._le.inverse_transform(class_index)
wxchan's avatar
wxchan committed
1195

1196
1197
    predict.__doc__ = LGBMModel.predict.__doc__

1198
1199
    def predict_proba(
        self,
1200
        X: _LGBM_ScikitMatrixLike,
1201
1202
1203
1204
1205
1206
1207
1208
        raw_score: bool = False,
        start_iteration: int = 0,
        num_iteration: Optional[int] = None,
        pred_leaf: bool = False,
        pred_contrib: bool = False,
        validate_features: bool = False,
        **kwargs: Any
    ):
1209
        """Docstring is set after definition, using a template."""
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
        result = super().predict(
            X=X,
            raw_score=raw_score,
            start_iteration=start_iteration,
            num_iteration=num_iteration,
            pred_leaf=pred_leaf,
            pred_contrib=pred_contrib,
            validate_features=validate_features,
            **kwargs
        )
1220
        if callable(self._objective) and not (raw_score or pred_leaf or pred_contrib):
1221
1222
1223
            _log_warning("Cannot compute class probabilities or labels "
                         "due to the usage of customized objective function.\n"
                         "Returning raw scores instead.")
1224
            return result
1225
        elif self._n_classes > 2 or raw_score or pred_leaf or pred_contrib:  # type: ignore [operator]
1226
            return result
wxchan's avatar
wxchan committed
1227
        else:
1228
            return np.vstack((1. - result, result)).transpose()
1229

1230
1231
    predict_proba.__doc__ = _lgbmmodel_doc_predict.format(
        description="Return the predicted probability for each class for each sample.",
1232
        X_shape="numpy array, pandas DataFrame, H2O DataTable's Frame , scipy.sparse, list of lists of int or float of shape = [n_samples, n_features]",
1233
        output_name="predicted_probability",
1234
        predicted_result_shape="array-like of shape = [n_samples] or shape = [n_samples, n_classes]",
1235
1236
1237
1238
        X_leaves_shape="array-like of shape = [n_samples, n_trees] or shape = [n_samples, n_trees * n_classes]",
        X_SHAP_values_shape="array-like of shape = [n_samples, n_features + 1] or shape = [n_samples, (n_features + 1) * n_classes] or list with n_classes length of such objects"
    )

1239
    @property
1240
    def classes_(self) -> np.ndarray:
1241
        """:obj:`array` of shape = [n_classes]: The class label array."""
1242
        if not self.__sklearn_is_fitted__():
1243
            raise LGBMNotFittedError('No classes found. Need to call fit beforehand.')
1244
        return self._classes  # type: ignore[return-value]
1245
1246

    @property
1247
    def n_classes_(self) -> int:
1248
        """:obj:`int`: The number of classes."""
1249
        if not self.__sklearn_is_fitted__():
1250
1251
            raise LGBMNotFittedError('No classes found. Need to call fit beforehand.')
        return self._n_classes
wxchan's avatar
wxchan committed
1252

wxchan's avatar
wxchan committed
1253

wxchan's avatar
wxchan committed
1254
class LGBMRanker(LGBMModel):
1255
1256
1257
1258
1259
1260
1261
1262
    """LightGBM ranker.

    .. warning::

        scikit-learn doesn't support ranking applications yet,
        therefore this class is not really compatible with the sklearn ecosystem.
        Please use this class mainly for training and applying ranking models in common sklearnish way.
    """
wxchan's avatar
wxchan committed
1263

1264
    def fit(  # type: ignore[override]
1265
        self,
1266
1267
        X: _LGBM_ScikitMatrixLike,
        y: _LGBM_LabelType,
1268
1269
        sample_weight: Optional[_LGBM_WeightType] = None,
        init_score: Optional[_LGBM_InitScoreType] = None,
1270
        group: Optional[_LGBM_GroupType] = None,
1271
        eval_set: Optional[List[_LGBM_ScikitValidSet]] = None,
1272
        eval_names: Optional[List[str]] = None,
1273
1274
1275
        eval_sample_weight: Optional[List[_LGBM_WeightType]] = None,
        eval_init_score: Optional[List[_LGBM_InitScoreType]] = None,
        eval_group: Optional[List[_LGBM_GroupType]] = None,
1276
        eval_metric: Optional[_LGBM_ScikitEvalMetricType] = None,
1277
        eval_at: Union[List[int], Tuple[int, ...]] = (1, 2, 3, 4, 5),
1278
1279
        feature_name: _LGBM_FeatureNameConfiguration = 'auto',
        categorical_feature: _LGBM_CategoricalFeatureConfiguration = 'auto',
1280
        callbacks: Optional[List[Callable]] = None,
1281
        init_model: Optional[Union[str, Path, Booster, LGBMModel]] = None
1282
    ) -> "LGBMRanker":
1283
        """Docstring is inherited from the LGBMModel."""
1284
        # check group data
Guolin Ke's avatar
Guolin Ke committed
1285
        if group is None:
1286
            raise ValueError("Should set group for ranking task")
wxchan's avatar
wxchan committed
1287
1288

        if eval_set is not None:
Guolin Ke's avatar
Guolin Ke committed
1289
            if eval_group is None:
1290
                raise ValueError("Eval_group cannot be None when eval_set is not None")
Guolin Ke's avatar
Guolin Ke committed
1291
            elif len(eval_group) != len(eval_set):
1292
                raise ValueError("Length of eval_group should be equal to eval_set")
1293
            elif (isinstance(eval_group, dict)
1294
                  and any(i not in eval_group or eval_group[i] is None for i in range(len(eval_group)))
1295
1296
                  or isinstance(eval_group, list)
                  and any(group is None for group in eval_group)):
1297
1298
                raise ValueError("Should set group for all eval datasets for ranking task; "
                                 "if you use dict, the index should start from 0")
1299

1300
        self._eval_at = eval_at
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
        super().fit(
            X,
            y,
            sample_weight=sample_weight,
            init_score=init_score,
            group=group,
            eval_set=eval_set,
            eval_names=eval_names,
            eval_sample_weight=eval_sample_weight,
            eval_init_score=eval_init_score,
            eval_group=eval_group,
            eval_metric=eval_metric,
            feature_name=feature_name,
            categorical_feature=categorical_feature,
            callbacks=callbacks,
            init_model=init_model
        )
wxchan's avatar
wxchan committed
1318
        return self
1319

1320
    _base_doc = LGBMModel.fit.__doc__.replace("self : LGBMModel", "self : LGBMRanker")  # type: ignore
1321
1322
    fit.__doc__ = (_base_doc[:_base_doc.find('eval_class_weight :')]  # type: ignore
                   + _base_doc[_base_doc.find('eval_init_score :'):])  # type: ignore
1323
    _base_doc = fit.__doc__
1324
    _before_feature_name, _feature_name, _after_feature_name = _base_doc.partition('feature_name :')
1325
    fit.__doc__ = f"""{_before_feature_name}eval_at : list or tuple of int, optional (default=(1, 2, 3, 4, 5))
1326
        The evaluation positions of the specified metric.
1327
    {_feature_name}{_after_feature_name}"""