"vscode:/vscode.git/clone" did not exist on "9d433033f21019321fc542201e95bbd767838c69"
sklearn.py 59 KB
Newer Older
wxchan's avatar
wxchan committed
1
# coding: utf-8
2
"""Scikit-learn wrapper interface for LightGBM."""
3
import copy
4
from inspect import signature
5
from pathlib import Path
6
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
7

wxchan's avatar
wxchan committed
8
import numpy as np
9
import scipy.sparse
10

11
from .basic import (Booster, Dataset, LightGBMError, _choose_param_value, _ConfigAliases, _LGBM_BoosterBestScoreType,
12
                    _LGBM_CategoricalFeatureConfiguration, _LGBM_EvalFunctionResultType, _LGBM_FeatureNameConfiguration,
13
                    _LGBM_GroupType, _LGBM_LabelType, _log_warning)
14
from .callback import _EvalResultDict, record_evaluation
15
16
from .compat import (SKLEARN_INSTALLED, LGBMNotFittedError, _LGBMAssertAllFinite, _LGBMCheckArray,
                     _LGBMCheckClassificationTargets, _LGBMCheckSampleWeight, _LGBMCheckXY, _LGBMClassifierBase,
17
18
                     _LGBMComputeSampleWeight, _LGBMCpuCount, _LGBMLabelEncoder, _LGBMModelBase, _LGBMRegressorBase,
                     dt_DataTable, pd_DataFrame)
wxchan's avatar
wxchan committed
19
from .engine import train
20

21
22
23
24
25
26
27
__all__ = [
    'LGBMClassifier',
    'LGBMModel',
    'LGBMRanker',
    'LGBMRegressor',
]

28
29
30
31
32
33
34
_LGBM_ScikitMatrixLike = Union[
    dt_DataTable,
    List[Union[List[float], List[int]]],
    np.ndarray,
    pd_DataFrame,
    scipy.sparse.spmatrix
]
35
_LGBM_ScikitCustomObjectiveFunction = Union[
36
    # f(labels, preds)
37
    Callable[
38
        [Optional[np.ndarray], np.ndarray],
39
        Tuple[np.ndarray, np.ndarray]
40
    ],
41
    # f(labels, preds, weights)
42
    Callable[
43
        [Optional[np.ndarray], np.ndarray, Optional[np.ndarray]],
44
        Tuple[np.ndarray, np.ndarray]
45
    ],
46
    # f(labels, preds, weights, group)
47
    Callable[
48
        [Optional[np.ndarray], np.ndarray, Optional[np.ndarray], Optional[np.ndarray]],
49
50
        Tuple[np.ndarray, np.ndarray]
    ],
51
52
]
_LGBM_ScikitCustomEvalFunction = Union[
53
    # f(labels, preds)
54
    Callable[
55
56
        [Optional[np.ndarray], np.ndarray],
        _LGBM_EvalFunctionResultType
57
58
    ],
    Callable[
59
60
        [Optional[np.ndarray], np.ndarray],
        List[_LGBM_EvalFunctionResultType]
61
    ],
62
    # f(labels, preds, weights)
63
    Callable[
64
65
        [Optional[np.ndarray], np.ndarray, Optional[np.ndarray]],
        _LGBM_EvalFunctionResultType
66
    ],
67
68
69
70
71
72
73
74
75
76
77
78
79
    Callable[
        [Optional[np.ndarray], np.ndarray, Optional[np.ndarray]],
        List[_LGBM_EvalFunctionResultType]
    ],
    # f(labels, preds, weights, group)
    Callable[
        [Optional[np.ndarray], np.ndarray, Optional[np.ndarray], Optional[np.ndarray]],
        _LGBM_EvalFunctionResultType
    ],
    Callable[
        [Optional[np.ndarray], np.ndarray, Optional[np.ndarray], Optional[np.ndarray]],
        List[_LGBM_EvalFunctionResultType]
    ]
80
]
81
82
83
84
85
_LGBM_ScikitEvalMetricType = Union[
    str,
    _LGBM_ScikitCustomEvalFunction,
    List[Union[str, _LGBM_ScikitCustomEvalFunction]]
]
86

wxchan's avatar
wxchan committed
87

88
class _ObjectiveFunctionWrapper:
89
    """Proxy class for objective function."""
90

91
    def __init__(self, func: _LGBM_ScikitCustomObjectiveFunction):
92
        """Construct a proxy class.
93

94
95
        This class transforms objective function to match objective function with signature ``new_func(preds, dataset)``
        as expected by ``lightgbm.engine.train``.
96

97
98
99
        Parameters
        ----------
        func : callable
100
101
102
103
            Expects a callable with following signatures:
            ``func(y_true, y_pred)``,
            ``func(y_true, y_pred, weight)``
            or ``func(y_true, y_pred, weight, group)``
104
105
            and returns (grad, hess):

106
                y_true : numpy 1-D array of shape = [n_samples]
107
                    The target values.
108
                y_pred : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
109
                    The predicted values.
110
111
                    Predicted values are returned before any transformation,
                    e.g. they are raw margin instead of probability of positive class for binary task.
112
113
                weight : numpy 1-D array of shape = [n_samples]
                    The weight of samples. Weights should be non-negative.
114
                group : numpy 1-D array
115
116
117
                    Group/query data.
                    Only used in the learning-to-rank task.
                    sum(group) = n_samples.
118
119
                    For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
                    where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
120
                grad : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape [n_samples, n_classes] (for multi-class task)
121
122
                    The value of the first order derivative (gradient) of the loss
                    with respect to the elements of y_pred for each sample point.
123
                hess : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
124
125
                    The value of the second order derivative (Hessian) of the loss
                    with respect to the elements of y_pred for each sample point.
wxchan's avatar
wxchan committed
126

Nikita Titov's avatar
Nikita Titov committed
127
128
        .. note::

129
            For multi-class task, y_pred is a numpy 2-D array of shape = [n_samples, n_classes],
130
            and grad and hess should be returned in the same format.
131
132
        """
        self.func = func
wxchan's avatar
wxchan committed
133

134
    def __call__(self, preds: np.ndarray, dataset: Dataset) -> Tuple[np.ndarray, np.ndarray]:
135
136
137
138
        """Call passed function with appropriate arguments.

        Parameters
        ----------
139
        preds : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
140
141
142
143
144
145
            The predicted values.
        dataset : Dataset
            The training dataset.

        Returns
        -------
146
        grad : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
147
148
            The value of the first order derivative (gradient) of the loss
            with respect to the elements of preds for each sample point.
149
        hess : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
150
151
            The value of the second order derivative (Hessian) of the loss
            with respect to the elements of preds for each sample point.
152
        """
wxchan's avatar
wxchan committed
153
        labels = dataset.get_label()
154
        argc = len(signature(self.func).parameters)
155
        if argc == 2:
156
            grad, hess = self.func(labels, preds)  # type: ignore[call-arg]
157
        elif argc == 3:
158
            grad, hess = self.func(labels, preds, dataset.get_weight())  # type: ignore[call-arg]
159
        elif argc == 4:
160
            grad, hess = self.func(labels, preds, dataset.get_weight(), dataset.get_group())  # type: ignore [call-arg]
161
        else:
162
            raise TypeError(f"Self-defined objective function should have 2, 3 or 4 arguments, got {argc}")
wxchan's avatar
wxchan committed
163
164
        return grad, hess

wxchan's avatar
wxchan committed
165

166
class _EvalFunctionWrapper:
167
    """Proxy class for evaluation function."""
168

169
    def __init__(self, func: _LGBM_ScikitCustomEvalFunction):
170
        """Construct a proxy class.
171

172
173
        This class transforms evaluation function to match evaluation function with signature ``new_func(preds, dataset)``
        as expected by ``lightgbm.engine.train``.
174

175
176
177
178
179
180
181
182
183
184
        Parameters
        ----------
        func : callable
            Expects a callable with following signatures:
            ``func(y_true, y_pred)``,
            ``func(y_true, y_pred, weight)``
            or ``func(y_true, y_pred, weight, group)``
            and returns (eval_name, eval_result, is_higher_better) or
            list of (eval_name, eval_result, is_higher_better):

185
                y_true : numpy 1-D array of shape = [n_samples]
186
                    The target values.
187
                y_pred : numpy 1-D array of shape = [n_samples] or numpy 2-D array shape = [n_samples, n_classes] (for multi-class task)
188
                    The predicted values.
189
190
                    In case of custom ``objective``, predicted values are returned before any transformation,
                    e.g. they are raw margin instead of probability of positive class for binary task in this case.
191
                weight : numpy 1-D array of shape = [n_samples]
192
                    The weight of samples. Weights should be non-negative.
193
                group : numpy 1-D array
194
195
196
                    Group/query data.
                    Only used in the learning-to-rank task.
                    sum(group) = n_samples.
197
198
                    For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
                    where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
199
                eval_name : str
Andrew Ziem's avatar
Andrew Ziem committed
200
                    The name of evaluation function (without whitespace).
201
202
203
204
205
206
                eval_result : float
                    The eval result.
                is_higher_better : bool
                    Is eval result higher better, e.g. AUC is ``is_higher_better``.
        """
        self.func = func
207

208
209
210
211
212
    def __call__(
        self,
        preds: np.ndarray,
        dataset: Dataset
    ) -> Union[_LGBM_EvalFunctionResultType, List[_LGBM_EvalFunctionResultType]]:
213
        """Call passed function with appropriate arguments.
214

215
216
        Parameters
        ----------
217
        preds : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
218
219
220
221
222
223
            The predicted values.
        dataset : Dataset
            The training dataset.

        Returns
        -------
224
        eval_name : str
Andrew Ziem's avatar
Andrew Ziem committed
225
            The name of evaluation function (without whitespace).
226
227
228
229
230
        eval_result : float
            The eval result.
        is_higher_better : bool
            Is eval result higher better, e.g. AUC is ``is_higher_better``.
        """
231
        labels = dataset.get_label()
232
        argc = len(signature(self.func).parameters)
233
        if argc == 2:
234
            return self.func(labels, preds)  # type: ignore[call-arg]
235
        elif argc == 3:
236
            return self.func(labels, preds, dataset.get_weight())  # type: ignore[call-arg]
237
        elif argc == 4:
238
            return self.func(labels, preds, dataset.get_weight(), dataset.get_group())  # type: ignore[call-arg]
239
        else:
240
            raise TypeError(f"Self-defined eval function should have 2, 3 or 4 arguments, got {argc}")
241

wxchan's avatar
wxchan committed
242

243
244
245
246
247
248
249
250
251
252
253
254
255
256
# documentation templates for LGBMModel methods are shared between the classes in
# this module and those in the ``dask`` module

_lgbmmodel_doc_fit = (
    """
    Build a gradient boosting model from the training set (X, y).

    Parameters
    ----------
    X : {X_shape}
        Input feature matrix.
    y : {y_shape}
        The target values (class labels in classification, real numbers in regression).
    sample_weight : {sample_weight_shape}
257
        Weights of training data. Weights should be non-negative.
258
    init_score : {init_score_shape}
259
260
261
262
263
264
265
266
267
        Init score of training data.
    group : {group_shape}
        Group/query data.
        Only used in the learning-to-rank task.
        sum(group) = n_samples.
        For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
        where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
    eval_set : list or None, optional (default=None)
        A list of (X, y) tuple pairs to use as validation sets.
268
    eval_names : list of str, or None, optional (default=None)
269
        Names of eval_set.
270
    eval_sample_weight : {eval_sample_weight_shape}
271
        Weights of eval data. Weights should be non-negative.
272
273
    eval_class_weight : list or None, optional (default=None)
        Class weights of eval data.
274
    eval_init_score : {eval_init_score_shape}
275
        Init score of eval data.
276
    eval_group : {eval_group_shape}
277
        Group data of eval data.
278
279
    eval_metric : str, callable, list or None, optional (default=None)
        If str, it should be a built-in evaluation metric to use.
280
281
282
283
        If callable, it should be a custom evaluation metric, see note below for more details.
        If list, it can be a list of built-in metrics, a list of custom evaluation metrics, or a mix of both.
        In either case, the ``metric`` from the model parameters will be evaluated and used as well.
        Default: 'l2' for LGBMRegressor, 'logloss' for LGBMClassifier, 'ndcg' for LGBMRanker.
284
    feature_name : list of str, or 'auto', optional (default='auto')
285
286
        Feature names.
        If 'auto' and data is pandas DataFrame, data columns names are used.
287
    categorical_feature : list of str or int, or 'auto', optional (default='auto')
288
289
        Categorical features.
        If list of int, interpreted as indices.
290
        If list of str, interpreted as feature names (need to specify ``feature_name`` as well).
291
        If 'auto' and data is pandas DataFrame, pandas unordered categorical columns are used.
292
        All values in categorical features will be cast to int32 and thus should be less than int32 max value (2147483647).
293
294
295
        Large values could be memory consuming. Consider using consecutive integers starting from zero.
        All negative values in categorical features will be treated as missing values.
        The output cannot be monotonically constrained with respect to a categorical feature.
296
        Floating point numbers in categorical features will be rounded towards 0.
297
    callbacks : list of callable, or None, optional (default=None)
298
299
        List of callback functions that are applied at each iteration.
        See Callbacks in Python API for more information.
300
    init_model : str, pathlib.Path, Booster, LGBMModel or None, optional (default=None)
301
302
303
304
        Filename of LightGBM model, Booster instance or LGBMModel instance used for continue training.

    Returns
    -------
305
    self : LGBMModel
306
307
308
309
310
311
312
313
314
315
316
317
318
        Returns self.
    """
)

_lgbmmodel_doc_custom_eval_note = """
    Note
    ----
    Custom eval function expects a callable with following signatures:
    ``func(y_true, y_pred)``, ``func(y_true, y_pred, weight)`` or
    ``func(y_true, y_pred, weight, group)``
    and returns (eval_name, eval_result, is_higher_better) or
    list of (eval_name, eval_result, is_higher_better):

319
        y_true : numpy 1-D array of shape = [n_samples]
320
            The target values.
321
        y_pred : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
322
            The predicted values.
323
324
            In case of custom ``objective``, predicted values are returned before any transformation,
            e.g. they are raw margin instead of probability of positive class for binary task in this case.
325
        weight : numpy 1-D array of shape = [n_samples]
326
            The weight of samples. Weights should be non-negative.
327
        group : numpy 1-D array
328
329
330
331
332
            Group/query data.
            Only used in the learning-to-rank task.
            sum(group) = n_samples.
            For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
            where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
333
        eval_name : str
Andrew Ziem's avatar
Andrew Ziem committed
334
            The name of evaluation function (without whitespace).
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
        eval_result : float
            The eval result.
        is_higher_better : bool
            Is eval result higher better, e.g. AUC is ``is_higher_better``.
"""

_lgbmmodel_doc_predict = (
    """
    {description}

    Parameters
    ----------
    X : {X_shape}
        Input features matrix.
    raw_score : bool, optional (default=False)
        Whether to predict raw scores.
    start_iteration : int, optional (default=0)
        Start index of the iteration to predict.
        If <= 0, starts from the first iteration.
    num_iteration : int or None, optional (default=None)
        Total number of iterations used in the prediction.
        If None, if the best iteration exists and start_iteration <= 0, the best iteration is used;
        otherwise, all iterations from ``start_iteration`` are used (no limits).
        If <= 0, all iterations from ``start_iteration`` are used (no limits).
    pred_leaf : bool, optional (default=False)
        Whether to predict leaf index.
    pred_contrib : bool, optional (default=False)
        Whether to predict feature contributions.

        .. note::

            If you want to get more explanations for your model's predictions using SHAP values,
            like SHAP interaction values,
            you can install the shap package (https://github.com/slundberg/shap).
            Note that unlike the shap package, with ``pred_contrib`` we return a matrix with an extra
            column, where the last column is the expected value.

372
373
374
    validate_features : bool, optional (default=False)
        If True, ensure that the features used to predict match the ones used to train.
        Used only if data is pandas DataFrame.
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
    **kwargs
        Other parameters for the prediction.

    Returns
    -------
    {output_name} : {predicted_result_shape}
        The predicted values.
    X_leaves : {X_leaves_shape}
        If ``pred_leaf=True``, the predicted leaf of every tree for each sample.
    X_SHAP_values : {X_SHAP_values_shape}
        If ``pred_contrib=True``, the feature contributions for each sample.
    """
)


390
391
class LGBMModel(_LGBMModelBase):
    """Implementation of the scikit-learn API for LightGBM."""
wxchan's avatar
wxchan committed
392

393
394
395
396
397
398
399
400
    def __init__(
        self,
        boosting_type: str = 'gbdt',
        num_leaves: int = 31,
        max_depth: int = -1,
        learning_rate: float = 0.1,
        n_estimators: int = 100,
        subsample_for_bin: int = 200000,
401
        objective: Optional[Union[str, _LGBM_ScikitCustomObjectiveFunction]] = None,
402
403
404
405
406
407
408
409
410
411
        class_weight: Optional[Union[Dict, str]] = None,
        min_split_gain: float = 0.,
        min_child_weight: float = 1e-3,
        min_child_samples: int = 20,
        subsample: float = 1.,
        subsample_freq: int = 0,
        colsample_bytree: float = 1.,
        reg_alpha: float = 0.,
        reg_lambda: float = 0.,
        random_state: Optional[Union[int, np.random.RandomState]] = None,
412
        n_jobs: Optional[int] = None,
413
414
415
        importance_type: str = 'split',
        **kwargs
    ):
416
        r"""Construct a gradient boosting model.
wxchan's avatar
wxchan committed
417
418
419

        Parameters
        ----------
420
        boosting_type : str, optional (default='gbdt')
421
422
423
424
            'gbdt', traditional Gradient Boosting Decision Tree.
            'dart', Dropouts meet Multiple Additive Regression Trees.
            'rf', Random Forest.
        num_leaves : int, optional (default=31)
wxchan's avatar
wxchan committed
425
            Maximum tree leaves for base learners.
426
        max_depth : int, optional (default=-1)
427
            Maximum tree depth for base learners, <=0 means no limit.
428
        learning_rate : float, optional (default=0.1)
429
            Boosting learning rate.
430
431
432
            You can use ``callbacks`` parameter of ``fit`` method to shrink/adapt learning rate
            in training using ``reset_parameter`` callback.
            Note, that this will ignore the ``learning_rate`` argument in training.
433
        n_estimators : int, optional (default=100)
wxchan's avatar
wxchan committed
434
            Number of boosted trees to fit.
435
        subsample_for_bin : int, optional (default=200000)
wxchan's avatar
wxchan committed
436
            Number of samples for constructing bins.
437
        objective : str, callable or None, optional (default=None)
wxchan's avatar
wxchan committed
438
439
            Specify the learning task and the corresponding learning objective or
            a custom objective function to be used (see note below).
440
            Default: 'regression' for LGBMRegressor, 'binary' or 'multiclass' for LGBMClassifier, 'lambdarank' for LGBMRanker.
441
442
443
444
        class_weight : dict, 'balanced' or None, optional (default=None)
            Weights associated with classes in the form ``{class_label: weight}``.
            Use this parameter only for multi-class classification task;
            for binary classification task you may use ``is_unbalance`` or ``scale_pos_weight`` parameters.
445
446
447
            Note, that the usage of all these parameters will result in poor estimates of the individual class probabilities.
            You may want to consider performing probability calibration
            (https://scikit-learn.org/stable/modules/calibration.html) of your model.
448
449
450
            The 'balanced' mode uses the values of y to automatically adjust weights
            inversely proportional to class frequencies in the input data as ``n_samples / (n_classes * np.bincount(y))``.
            If None, all classes are supposed to have weight one.
451
            Note, that these weights will be multiplied with ``sample_weight`` (passed through the ``fit`` method)
452
            if ``sample_weight`` is specified.
453
        min_split_gain : float, optional (default=0.)
wxchan's avatar
wxchan committed
454
            Minimum loss reduction required to make a further partition on a leaf node of the tree.
455
        min_child_weight : float, optional (default=1e-3)
456
            Minimum sum of instance weight (Hessian) needed in a child (leaf).
457
        min_child_samples : int, optional (default=20)
458
            Minimum number of data needed in a child (leaf).
459
        subsample : float, optional (default=1.)
wxchan's avatar
wxchan committed
460
            Subsample ratio of the training instance.
461
        subsample_freq : int, optional (default=0)
Andrew Ziem's avatar
Andrew Ziem committed
462
            Frequency of subsample, <=0 means no enable.
463
        colsample_bytree : float, optional (default=1.)
wxchan's avatar
wxchan committed
464
            Subsample ratio of columns when constructing each tree.
465
        reg_alpha : float, optional (default=0.)
466
            L1 regularization term on weights.
467
        reg_lambda : float, optional (default=0.)
468
            L2 regularization term on weights.
469
        random_state : int, RandomState object or None, optional (default=None)
wxchan's avatar
wxchan committed
470
            Random number seed.
471
472
473
            If int, this number is used to seed the C++ code.
            If RandomState object (numpy), a random integer is picked based on its state to seed the C++ code.
            If None, default seeds in C++ code are used.
474
475
476
477
478
479
480
481
482
483
484
485
        n_jobs : int or None, optional (default=None)
            Number of parallel threads to use for training (can be changed at prediction time by
            passing it as an extra keyword argument).

            For better performance, it is recommended to set this to the number of physical cores
            in the CPU.

            Negative integers are interpreted as following joblib's formula (n_cpus + 1 + n_jobs), just like
            scikit-learn (so e.g. -1 means using all threads). A value of zero corresponds the default number of
            threads configured for OpenMP in the system. A value of ``None`` (the default) corresponds
            to using the number of physical cores in the system (its correct detection requires
            either the ``joblib`` or the ``psutil`` util libraries to be installed).
486
        importance_type : str, optional (default='split')
487
            The type of feature importance to be filled into ``feature_importances_``.
488
489
490
491
            If 'split', result contains numbers of times the feature is used in a model.
            If 'gain', result contains total gains of splits which use the feature.
        **kwargs
            Other parameters for the model.
wxchan's avatar
wxchan committed
492
            Check http://lightgbm.readthedocs.io/en/latest/Parameters.html for more parameters.
493

Nikita Titov's avatar
Nikita Titov committed
494
495
496
            .. warning::

                \*\*kwargs is not supported in sklearn, it may cause unexpected issues.
wxchan's avatar
wxchan committed
497
498
499

        Note
        ----
500
501
        A custom objective function can be provided for the ``objective`` parameter.
        In this case, it should have the signature
502
503
504
        ``objective(y_true, y_pred) -> grad, hess``,
        ``objective(y_true, y_pred, weight) -> grad, hess``
        or ``objective(y_true, y_pred, weight, group) -> grad, hess``:
wxchan's avatar
wxchan committed
505

506
            y_true : numpy 1-D array of shape = [n_samples]
507
                The target values.
508
            y_pred : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
509
                The predicted values.
510
511
                Predicted values are returned before any transformation,
                e.g. they are raw margin instead of probability of positive class for binary task.
512
513
            weight : numpy 1-D array of shape = [n_samples]
                The weight of samples. Weights should be non-negative.
514
            group : numpy 1-D array
515
516
517
                Group/query data.
                Only used in the learning-to-rank task.
                sum(group) = n_samples.
518
519
                For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
                where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
520
            grad : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
521
522
                The value of the first order derivative (gradient) of the loss
                with respect to the elements of y_pred for each sample point.
523
            hess : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
524
525
                The value of the second order derivative (Hessian) of the loss
                with respect to the elements of y_pred for each sample point.
wxchan's avatar
wxchan committed
526

527
        For multi-class task, y_pred is a numpy 2-D array of shape = [n_samples, n_classes],
528
        and grad and hess should be returned in the same format.
wxchan's avatar
wxchan committed
529
        """
wxchan's avatar
wxchan committed
530
        if not SKLEARN_INSTALLED:
531
532
            raise LightGBMError('scikit-learn is required for lightgbm.sklearn. '
                                'You must install scikit-learn and restart your session to use this module.')
wxchan's avatar
wxchan committed
533

534
        self.boosting_type = boosting_type
535
        self.objective = objective
wxchan's avatar
wxchan committed
536
537
538
539
        self.num_leaves = num_leaves
        self.max_depth = max_depth
        self.learning_rate = learning_rate
        self.n_estimators = n_estimators
wxchan's avatar
wxchan committed
540
        self.subsample_for_bin = subsample_for_bin
wxchan's avatar
wxchan committed
541
542
543
544
545
546
547
548
        self.min_split_gain = min_split_gain
        self.min_child_weight = min_child_weight
        self.min_child_samples = min_child_samples
        self.subsample = subsample
        self.subsample_freq = subsample_freq
        self.colsample_bytree = colsample_bytree
        self.reg_alpha = reg_alpha
        self.reg_lambda = reg_lambda
549
550
        self.random_state = random_state
        self.n_jobs = n_jobs
551
        self.importance_type = importance_type
552
        self._Booster: Optional[Booster] = None
553
554
        self._evals_result: _EvalResultDict = {}
        self._best_score: _LGBM_BoosterBestScoreType = {}
555
        self._best_iteration: int = -1
556
        self._other_params: Dict[str, Any] = {}
557
        self._objective = objective
558
        self.class_weight = class_weight
559
560
        self._class_weight: Optional[Union[Dict, str]] = None
        self._class_map: Optional[Dict[int, int]] = None
561
562
        self._n_features: int = -1
        self._n_features_in: int = -1
563
        self._classes: Optional[np.ndarray] = None
564
        self._n_classes: int = -1
565
        self.set_params(**kwargs)
wxchan's avatar
wxchan committed
566

567
    def _more_tags(self) -> Dict[str, Any]:
568
569
570
571
572
573
574
575
576
577
        return {
            'allow_nan': True,
            'X_types': ['2darray', 'sparse', '1dlabels'],
            '_xfail_checks': {
                'check_no_attributes_set_in_init':
                'scikit-learn incorrectly asserts that private attributes '
                'cannot be set in __init__: '
                '(see https://github.com/microsoft/LightGBM/issues/2628)'
            }
        }
Nikita Titov's avatar
Nikita Titov committed
578

579
580
581
    def __sklearn_is_fitted__(self) -> bool:
        return getattr(self, "fitted_", False)

582
    def get_params(self, deep: bool = True) -> Dict[str, Any]:
583
584
585
586
587
588
589
590
591
592
593
594
595
        """Get parameters for this estimator.

        Parameters
        ----------
        deep : bool, optional (default=True)
            If True, will return the parameters for this estimator and
            contained subobjects that are estimators.

        Returns
        -------
        params : dict
            Parameter names mapped to their values.
        """
596
        params = super().get_params(deep=deep)
597
        params.update(self._other_params)
wxchan's avatar
wxchan committed
598
599
        return params

600
    def set_params(self, **params: Any) -> "LGBMModel":
601
602
603
604
605
606
607
608
609
610
611
612
        """Set the parameters of this estimator.

        Parameters
        ----------
        **params
            Parameter names with their new values.

        Returns
        -------
        self : object
            Returns self.
        """
wxchan's avatar
wxchan committed
613
614
        for key, value in params.items():
            setattr(self, key, value)
615
616
            if hasattr(self, f"_{key}"):
                setattr(self, f"_{key}", value)
617
            self._other_params[key] = value
wxchan's avatar
wxchan committed
618
        return self
wxchan's avatar
wxchan committed
619

620
621
622
623
624
625
626
627
628
629
630
631
632
633
    def _process_params(self, stage: str) -> Dict[str, Any]:
        """Process the parameters of this estimator based on its type, parameter aliases, etc.

        Parameters
        ----------
        stage : str
            Name of the stage (can be ``fit`` or ``predict``) this method is called from.

        Returns
        -------
        processed_params : dict
            Processed parameter names mapped to their values.
        """
        assert stage in {"fit", "predict"}
634
635
636
637
638
        params = self.get_params()

        params.pop('objective', None)
        for alias in _ConfigAliases.get('objective'):
            if alias in params:
639
                obj = params.pop(alias)
640
                _log_warning(f"Found '{alias}' in params. Will use it instead of 'objective' argument")
641
642
643
644
645
646
647
648
649
650
651
652
653
                if stage == "fit":
                    self._objective = obj
        if stage == "fit":
            if self._objective is None:
                if isinstance(self, LGBMRegressor):
                    self._objective = "regression"
                elif isinstance(self, LGBMClassifier):
                    if self._n_classes > 2:
                        self._objective = "multiclass"
                    else:
                        self._objective = "binary"
                elif isinstance(self, LGBMRanker):
                    self._objective = "lambdarank"
654
                else:
655
                    raise ValueError("Unknown LGBMModel type.")
656
        if callable(self._objective):
657
            if stage == "fit":
658
659
660
                params['objective'] = _ObjectiveFunctionWrapper(self._objective)
            else:
                params['objective'] = 'None'
661
        else:
662
            params['objective'] = self._objective
663

664
        params.pop('importance_type', None)
wxchan's avatar
wxchan committed
665
        params.pop('n_estimators', None)
666
        params.pop('class_weight', None)
667

668
669
        if isinstance(params['random_state'], np.random.RandomState):
            params['random_state'] = params['random_state'].randint(np.iinfo(np.int32).max)
670
        if self._n_classes > 2:
671
672
            for alias in _ConfigAliases.get('num_class'):
                params.pop(alias, None)
673
674
            params['num_class'] = self._n_classes
        if hasattr(self, '_eval_at'):
675
            eval_at = self._eval_at
676
            for alias in _ConfigAliases.get('eval_at'):
677
678
679
680
                if alias in params:
                    _log_warning(f"Found '{alias}' in params. Will use it instead of 'eval_at' argument")
                    eval_at = params.pop(alias)
            params['eval_at'] = eval_at
wxchan's avatar
wxchan committed
681

682
        # register default metric for consistency with callable eval_metric case
683
        original_metric = self._objective if isinstance(self._objective, str) else None
684
685
686
687
688
689
690
691
692
693
        if original_metric is None:
            # try to deduce from class instance
            if isinstance(self, LGBMRegressor):
                original_metric = "l2"
            elif isinstance(self, LGBMClassifier):
                original_metric = "multi_logloss" if self._n_classes > 2 else "binary_logloss"
            elif isinstance(self, LGBMRanker):
                original_metric = "ndcg"

        # overwrite default metric by explicitly set metric
694
        params = _choose_param_value("metric", params, original_metric)
695

696
697
698
699
700
701
        # use joblib conventions for negative n_jobs, just like scikit-learn
        # at predict time, this is handled later due to the order of parameter updates
        if stage == "fit":
            params = _choose_param_value("num_threads", params, self.n_jobs)
            params["num_threads"] = self._process_n_jobs(params["num_threads"])

702
703
        return params

704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
    def _process_n_jobs(self, n_jobs: Optional[int]) -> int:
        """Convert special values of n_jobs to their actual values according to the formulas that apply.

        Parameters
        ----------
        n_jobs : int or None
            The original value of n_jobs, potentially having special values such as 'None' or
            negative integers.

        Returns
        -------
        n_jobs : int
            The value of n_jobs with special values converted to actual number of threads.
        """
        if n_jobs is None:
            n_jobs = _LGBMCpuCount(only_physical_cores=True)
        elif n_jobs < 0:
            n_jobs = max(_LGBMCpuCount(only_physical_cores=False) + 1 + n_jobs, 1)
        return n_jobs

724
725
    def fit(
        self,
726
727
        X: _LGBM_ScikitMatrixLike,
        y: _LGBM_LabelType,
728
729
        sample_weight=None,
        init_score=None,
730
        group: Optional[_LGBM_GroupType] = None,
731
        eval_set=None,
732
        eval_names: Optional[List[str]] = None,
733
734
735
736
        eval_sample_weight=None,
        eval_class_weight=None,
        eval_init_score=None,
        eval_group=None,
737
        eval_metric: Optional[_LGBM_ScikitEvalMetricType] = None,
738
739
        feature_name: _LGBM_FeatureNameConfiguration = 'auto',
        categorical_feature: _LGBM_CategoricalFeatureConfiguration = 'auto',
740
        callbacks: Optional[List[Callable]] = None,
741
        init_model: Optional[Union[str, Path, Booster, "LGBMModel"]] = None
742
    ) -> "LGBMModel":
743
744
745
746
747
748
749
750
751
752
753
754
755
        """Docstring is set after definition, using a template."""
        params = self._process_params(stage="fit")

        # Do not modify original args in fit function
        # Refer to https://github.com/microsoft/LightGBM/pull/2619
        eval_metric_list = copy.deepcopy(eval_metric)
        if not isinstance(eval_metric_list, list):
            eval_metric_list = [eval_metric_list]

        # Separate built-in from callable evaluation metrics
        eval_metrics_callable = [_EvalFunctionWrapper(f) for f in eval_metric_list if callable(f)]
        eval_metrics_builtin = [m for m in eval_metric_list if isinstance(m, str)]

756
        # concatenate metric from params (or default if not provided in params) and eval_metric
757
758
        params['metric'] = [params['metric']] if isinstance(params['metric'], (str, type(None))) else params['metric']
        params['metric'] = [e for e in eval_metrics_builtin if e not in params['metric']] + params['metric']
759
        params['metric'] = [metric for metric in params['metric'] if metric is not None]
wxchan's avatar
wxchan committed
760

761
        if not isinstance(X, (pd_DataFrame, dt_DataTable)):
762
            _X, _y = _LGBMCheckXY(X, y, accept_sparse=True, force_all_finite=False, ensure_min_samples=2)
763
764
            if sample_weight is not None:
                sample_weight = _LGBMCheckSampleWeight(sample_weight, _X)
765
766
        else:
            _X, _y = X, y
767

768
769
770
771
        if self._class_weight is None:
            self._class_weight = self.class_weight
        if self._class_weight is not None:
            class_sample_weight = _LGBMComputeSampleWeight(self._class_weight, y)
772
773
774
775
            if sample_weight is None or len(sample_weight) == 0:
                sample_weight = class_sample_weight
            else:
                sample_weight = np.multiply(sample_weight, class_sample_weight)
776

777
        self._n_features = _X.shape[1]
778
779
        # copy for consistency
        self._n_features_in = self._n_features
780

781
782
783
        train_set = Dataset(data=_X, label=_y, weight=sample_weight, group=group,
                            init_score=init_score, categorical_feature=categorical_feature,
                            params=params)
Guolin Ke's avatar
Guolin Ke committed
784

785
        valid_sets: List[Dataset] = []
Guolin Ke's avatar
Guolin Ke committed
786
        if eval_set is not None:
787

788
            def _get_meta_data(collection, name, i):
789
790
791
792
793
794
795
                if collection is None:
                    return None
                elif isinstance(collection, list):
                    return collection[i] if len(collection) > i else None
                elif isinstance(collection, dict):
                    return collection.get(i, None)
                else:
796
                    raise TypeError(f"{name} should be dict or list")
797

Guolin Ke's avatar
Guolin Ke committed
798
799
800
            if isinstance(eval_set, tuple):
                eval_set = [eval_set]
            for i, valid_data in enumerate(eval_set):
801
                # reduce cost for prediction training data
Guolin Ke's avatar
Guolin Ke committed
802
803
804
                if valid_data[0] is X and valid_data[1] is y:
                    valid_set = train_set
                else:
805
806
807
808
809
810
                    valid_weight = _get_meta_data(eval_sample_weight, 'eval_sample_weight', i)
                    valid_class_weight = _get_meta_data(eval_class_weight, 'eval_class_weight', i)
                    if valid_class_weight is not None:
                        if isinstance(valid_class_weight, dict) and self._class_map is not None:
                            valid_class_weight = {self._class_map[k]: v for k, v in valid_class_weight.items()}
                        valid_class_sample_weight = _LGBMComputeSampleWeight(valid_class_weight, valid_data[1])
811
812
813
814
                        if valid_weight is None or len(valid_weight) == 0:
                            valid_weight = valid_class_sample_weight
                        else:
                            valid_weight = np.multiply(valid_weight, valid_class_sample_weight)
815
816
                    valid_init_score = _get_meta_data(eval_init_score, 'eval_init_score', i)
                    valid_group = _get_meta_data(eval_group, 'eval_group', i)
817
818
819
820
                    valid_set = Dataset(data=valid_data[0], label=valid_data[1], weight=valid_weight,
                                        group=valid_group, init_score=valid_init_score,
                                        categorical_feature='auto', params=params)

Guolin Ke's avatar
Guolin Ke committed
821
822
                valid_sets.append(valid_set)

823
824
825
        if isinstance(init_model, LGBMModel):
            init_model = init_model.booster_

826
827
828
        if callbacks is None:
            callbacks = []
        else:
829
            callbacks = copy.copy(callbacks)  # don't use deepcopy here to allow non-serializable objects
830

831
        evals_result: _EvalResultDict = {}
832
833
834
835
836
837
838
839
        callbacks.append(record_evaluation(evals_result))

        self._Booster = train(
            params=params,
            train_set=train_set,
            num_boost_round=self.n_estimators,
            valid_sets=valid_sets,
            valid_names=eval_names,
840
            feval=eval_metrics_callable,  # type: ignore[arg-type]
841
842
843
844
            init_model=init_model,
            feature_name=feature_name,
            callbacks=callbacks
        )
wxchan's avatar
wxchan committed
845

846
        self._evals_result = evals_result
847
        self._best_iteration = self._Booster.best_iteration
848
        self._best_score = self._Booster.best_score
wxchan's avatar
wxchan committed
849

850
851
        self.fitted_ = True

wxchan's avatar
wxchan committed
852
        # free dataset
853
        self._Booster.free_dataset()
wxchan's avatar
wxchan committed
854
        del train_set, valid_sets
wxchan's avatar
wxchan committed
855
856
        return self

857
    fit.__doc__ = _lgbmmodel_doc_fit.format(
858
859
        X_shape="numpy array, pandas DataFrame, H2O DataTable's Frame , scipy.sparse, list of lists of int or float of shape = [n_samples, n_features]",
        y_shape="numpy array, pandas DataFrame, pandas Series, list of int or float of shape = [n_samples]",
860
        sample_weight_shape="array-like of shape = [n_samples] or None, optional (default=None)",
861
        init_score_shape="array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task) or shape = [n_samples, n_classes] (for multi-class task) or None, optional (default=None)",
862
        group_shape="numpy array, pandas Series, list of int or float, or None, optional (default=None)",
863
864
865
        eval_sample_weight_shape="list of array, or None, optional (default=None)",
        eval_init_score_shape="list of array, or None, optional (default=None)",
        eval_group_shape="list of array, or None, optional (default=None)"
866
867
    ) + "\n\n" + _lgbmmodel_doc_custom_eval_note

868
869
    def predict(
        self,
870
        X: _LGBM_ScikitMatrixLike,
871
872
873
874
875
876
877
878
        raw_score: bool = False,
        start_iteration: int = 0,
        num_iteration: Optional[int] = None,
        pred_leaf: bool = False,
        pred_contrib: bool = False,
        validate_features: bool = False,
        **kwargs: Any
    ):
879
        """Docstring is set after definition, using a template."""
880
        if not self.__sklearn_is_fitted__():
881
            raise LGBMNotFittedError("Estimator not fitted, call fit before exploiting the model.")
882
        if not isinstance(X, (pd_DataFrame, dt_DataTable)):
883
            X = _LGBMCheckArray(X, accept_sparse=True, force_all_finite=False)
884
885
886
        n_features = X.shape[1]
        if self._n_features != n_features:
            raise ValueError("Number of features of the model must "
887
888
                             f"match the input. Model n_features_ is {self._n_features} and "
                             f"input n_features is {n_features}")
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
        # retrive original params that possibly can be used in both training and prediction
        # and then overwrite them (considering aliases) with params that were passed directly in prediction
        predict_params = self._process_params(stage="predict")
        for alias in _ConfigAliases.get_by_alias(
            "data",
            "X",
            "raw_score",
            "start_iteration",
            "num_iteration",
            "pred_leaf",
            "pred_contrib",
            *kwargs.keys()
        ):
            predict_params.pop(alias, None)
        predict_params.update(kwargs)
904
905
906

        # number of threads can have values with special meaning which is only applied
        # in the scikit-learn interface, these should not reach the c++ side as-is
907
908
        predict_params = _choose_param_value("num_threads", predict_params, self.n_jobs)
        predict_params["num_threads"] = self._process_n_jobs(predict_params["num_threads"])
909

910
911
912
913
914
        return self._Booster.predict(  # type: ignore[union-attr]
            X, raw_score=raw_score, start_iteration=start_iteration, num_iteration=num_iteration,
            pred_leaf=pred_leaf, pred_contrib=pred_contrib, validate_features=validate_features,
            **predict_params
        )
wxchan's avatar
wxchan committed
915

916
917
    predict.__doc__ = _lgbmmodel_doc_predict.format(
        description="Return the predicted value for each sample.",
918
        X_shape="numpy array, pandas DataFrame, H2O DataTable's Frame , scipy.sparse, list of lists of int or float of shape = [n_samples, n_features]",
919
920
921
922
923
924
        output_name="predicted_result",
        predicted_result_shape="array-like of shape = [n_samples] or shape = [n_samples, n_classes]",
        X_leaves_shape="array-like of shape = [n_samples, n_trees] or shape = [n_samples, n_trees * n_classes]",
        X_SHAP_values_shape="array-like of shape = [n_samples, n_features + 1] or shape = [n_samples, (n_features + 1) * n_classes] or list with n_classes length of such objects"
    )

925
    @property
926
    def n_features_(self) -> int:
927
        """:obj:`int`: The number of features of fitted model."""
928
        if not self.__sklearn_is_fitted__():
929
930
931
            raise LGBMNotFittedError('No n_features found. Need to call fit beforehand.')
        return self._n_features

932
    @property
933
    def n_features_in_(self) -> int:
934
        """:obj:`int`: The number of features of fitted model."""
935
        if not self.__sklearn_is_fitted__():
936
937
938
            raise LGBMNotFittedError('No n_features_in found. Need to call fit beforehand.')
        return self._n_features_in

939
    @property
940
    def best_score_(self) -> _LGBM_BoosterBestScoreType:
941
        """:obj:`dict`: The best score of fitted model."""
942
        if not self.__sklearn_is_fitted__():
943
944
945
946
            raise LGBMNotFittedError('No best_score found. Need to call fit beforehand.')
        return self._best_score

    @property
947
    def best_iteration_(self) -> int:
948
        """:obj:`int`: The best iteration of fitted model if ``early_stopping()`` callback has been specified."""
949
        if not self.__sklearn_is_fitted__():
950
            raise LGBMNotFittedError('No best_iteration found. Need to call fit with early_stopping callback beforehand.')
951
952
953
        return self._best_iteration

    @property
954
    def objective_(self) -> Union[str, _LGBM_ScikitCustomObjectiveFunction]:
955
        """:obj:`str` or :obj:`callable`: The concrete objective used while fitting this model."""
956
        if not self.__sklearn_is_fitted__():
957
958
959
            raise LGBMNotFittedError('No objective found. Need to call fit beforehand.')
        return self._objective

960
961
962
963
964
965
966
967
968
    @property
    def n_estimators_(self) -> int:
        """:obj:`int`: True number of boosting iterations performed.

        This might be less than parameter ``n_estimators`` if early stopping was enabled or
        if boosting stopped early due to limits on complexity like ``min_gain_to_split``.
        """
        if not self.__sklearn_is_fitted__():
            raise LGBMNotFittedError('No n_estimators found. Need to call fit beforehand.')
969
        return self._Booster.current_iteration()  # type: ignore
970
971
972
973
974
975
976
977
978
979

    @property
    def n_iter_(self) -> int:
        """:obj:`int`: True number of boosting iterations performed.

        This might be less than parameter ``n_estimators`` if early stopping was enabled or
        if boosting stopped early due to limits on complexity like ``min_gain_to_split``.
        """
        if not self.__sklearn_is_fitted__():
            raise LGBMNotFittedError('No n_iter found. Need to call fit beforehand.')
980
        return self._Booster.current_iteration()  # type: ignore
981

982
    @property
983
    def booster_(self) -> Booster:
984
        """Booster: The underlying Booster of this model."""
985
        if not self.__sklearn_is_fitted__():
986
            raise LGBMNotFittedError('No booster found. Need to call fit beforehand.')
987
        return self._Booster  # type: ignore[return-value]
wxchan's avatar
wxchan committed
988

989
    @property
990
    def evals_result_(self) -> _EvalResultDict:
991
        """:obj:`dict`: The evaluation results if validation sets have been specified."""
992
        if not self.__sklearn_is_fitted__():
993
994
            raise LGBMNotFittedError('No results found. Need to call fit with eval_set beforehand.')
        return self._evals_result
995
996

    @property
997
    def feature_importances_(self) -> np.ndarray:
998
        """:obj:`array` of shape = [n_features]: The feature importances (the higher, the more important).
999

Nikita Titov's avatar
Nikita Titov committed
1000
1001
1002
1003
        .. note::

            ``importance_type`` attribute is passed to the function
            to configure the type of importance values to be extracted.
1004
        """
1005
        if not self.__sklearn_is_fitted__():
1006
            raise LGBMNotFittedError('No feature_importances found. Need to call fit beforehand.')
1007
        return self._Booster.feature_importance(importance_type=self.importance_type)  # type: ignore[union-attr]
wxchan's avatar
wxchan committed
1008

1009
    @property
1010
1011
    def feature_name_(self) -> List[str]:
        """:obj:`list` of shape = [n_features]: The names of features."""
1012
        if not self.__sklearn_is_fitted__():
1013
            raise LGBMNotFittedError('No feature_name found. Need to call fit beforehand.')
1014
        return self._Booster.feature_name()  # type: ignore[union-attr]
1015

wxchan's avatar
wxchan committed
1016

1017
class LGBMRegressor(_LGBMRegressorBase, LGBMModel):
1018
    """LightGBM regressor."""
wxchan's avatar
wxchan committed
1019

1020
    def fit(  # type: ignore[override]
1021
        self,
1022
1023
        X: _LGBM_ScikitMatrixLike,
        y: _LGBM_LabelType,
1024
1025
1026
        sample_weight=None,
        init_score=None,
        eval_set=None,
1027
        eval_names: Optional[List[str]] = None,
1028
1029
        eval_sample_weight=None,
        eval_init_score=None,
1030
        eval_metric: Optional[_LGBM_ScikitEvalMetricType] = None,
1031
1032
        feature_name: _LGBM_FeatureNameConfiguration = 'auto',
        categorical_feature: _LGBM_CategoricalFeatureConfiguration = 'auto',
1033
        callbacks: Optional[List[Callable]] = None,
1034
        init_model: Optional[Union[str, Path, Booster, LGBMModel]] = None
1035
    ) -> "LGBMRegressor":
1036
        """Docstring is inherited from the LGBMModel."""
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
        super().fit(
            X,
            y,
            sample_weight=sample_weight,
            init_score=init_score,
            eval_set=eval_set,
            eval_names=eval_names,
            eval_sample_weight=eval_sample_weight,
            eval_init_score=eval_init_score,
            eval_metric=eval_metric,
            feature_name=feature_name,
            categorical_feature=categorical_feature,
            callbacks=callbacks,
            init_model=init_model
        )
Guolin Ke's avatar
Guolin Ke committed
1052
1053
        return self

1054
    _base_doc = LGBMModel.fit.__doc__.replace("self : LGBMModel", "self : LGBMRegressor")  # type: ignore
1055
1056
    _base_doc = (_base_doc[:_base_doc.find('group :')]  # type: ignore
                 + _base_doc[_base_doc.find('eval_set :'):])  # type: ignore
1057
1058
1059
1060
    _base_doc = (_base_doc[:_base_doc.find('eval_class_weight :')]
                 + _base_doc[_base_doc.find('eval_init_score :'):])
    fit.__doc__ = (_base_doc[:_base_doc.find('eval_group :')]
                   + _base_doc[_base_doc.find('eval_metric :'):])
wxchan's avatar
wxchan committed
1061

1062

1063
class LGBMClassifier(_LGBMClassifierBase, LGBMModel):
1064
    """LightGBM classifier."""
wxchan's avatar
wxchan committed
1065

1066
    def fit(  # type: ignore[override]
1067
        self,
1068
1069
        X: _LGBM_ScikitMatrixLike,
        y: _LGBM_LabelType,
1070
1071
1072
        sample_weight=None,
        init_score=None,
        eval_set=None,
1073
        eval_names: Optional[List[str]] = None,
1074
1075
1076
        eval_sample_weight=None,
        eval_class_weight=None,
        eval_init_score=None,
1077
        eval_metric: Optional[_LGBM_ScikitEvalMetricType] = None,
1078
1079
        feature_name: _LGBM_FeatureNameConfiguration = 'auto',
        categorical_feature: _LGBM_CategoricalFeatureConfiguration = 'auto',
1080
        callbacks: Optional[List[Callable]] = None,
1081
        init_model: Optional[Union[str, Path, Booster, LGBMModel]] = None
1082
    ) -> "LGBMClassifier":
1083
        """Docstring is inherited from the LGBMModel."""
1084
        _LGBMAssertAllFinite(y)
1085
1086
        _LGBMCheckClassificationTargets(y)
        self._le = _LGBMLabelEncoder().fit(y)
1087
        _y = self._le.transform(y)
1088
        self._class_map = dict(zip(self._le.classes_, self._le.transform(self._le.classes_)))
1089
1090
        if isinstance(self.class_weight, dict):
            self._class_weight = {self._class_map[k]: v for k, v in self.class_weight.items()}
1091

1092
        self._classes = self._le.classes_
1093
        self._n_classes = len(self._classes)  # type: ignore[arg-type]
1094

1095
1096
        # adjust eval metrics to match whether binary or multiclass
        # classification is being performed
1097
        if not callable(eval_metric):
1098
1099
1100
1101
1102
1103
            if isinstance(eval_metric, list):
                eval_metric_list = eval_metric
            elif isinstance(eval_metric, str):
                eval_metric_list = [eval_metric]
            else:
                eval_metric_list = []
1104
            if self._n_classes > 2:
1105
                for index, metric in enumerate(eval_metric_list):
1106
                    if metric in {'logloss', 'binary_logloss'}:
1107
                        eval_metric_list[index] = "multi_logloss"
1108
                    elif metric in {'error', 'binary_error'}:
1109
                        eval_metric_list[index] = "multi_error"
1110
            else:
1111
                for index, metric in enumerate(eval_metric_list):
1112
                    if metric in {'logloss', 'multi_logloss'}:
1113
                        eval_metric_list[index] = 'binary_logloss'
1114
                    elif metric in {'error', 'multi_error'}:
1115
1116
                        eval_metric_list[index] = 'binary_error'
            eval_metric = eval_metric_list
wxchan's avatar
wxchan committed
1117

1118
        # do not modify args, as it causes errors in model selection tools
1119
        valid_sets: Optional[List[Tuple]] = None
wxchan's avatar
wxchan committed
1120
        if eval_set is not None:
1121
1122
            if isinstance(eval_set, tuple):
                eval_set = [eval_set]
1123
1124
            valid_sets = []
            for valid_x, valid_y in eval_set:
1125
                if valid_x is X and valid_y is y:
1126
                    valid_sets.append((valid_x, _y))
1127
                else:
1128
                    valid_sets.append((valid_x, self._le.transform(valid_y)))
1129

1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
        super().fit(
            X,
            _y,
            sample_weight=sample_weight,
            init_score=init_score,
            eval_set=valid_sets,
            eval_names=eval_names,
            eval_sample_weight=eval_sample_weight,
            eval_class_weight=eval_class_weight,
            eval_init_score=eval_init_score,
            eval_metric=eval_metric,
            feature_name=feature_name,
            categorical_feature=categorical_feature,
            callbacks=callbacks,
            init_model=init_model
        )
wxchan's avatar
wxchan committed
1146
1147
        return self

1148
    _base_doc = LGBMModel.fit.__doc__.replace("self : LGBMModel", "self : LGBMClassifier")  # type: ignore
1149
1150
    _base_doc = (_base_doc[:_base_doc.find('group :')]  # type: ignore
                 + _base_doc[_base_doc.find('eval_set :'):])  # type: ignore
1151
1152
    fit.__doc__ = (_base_doc[:_base_doc.find('eval_group :')]
                   + _base_doc[_base_doc.find('eval_metric :'):])
1153

1154
1155
    def predict(
        self,
1156
        X: _LGBM_ScikitMatrixLike,
1157
1158
1159
1160
1161
1162
1163
1164
        raw_score: bool = False,
        start_iteration: int = 0,
        num_iteration: Optional[int] = None,
        pred_leaf: bool = False,
        pred_contrib: bool = False,
        validate_features: bool = False,
        **kwargs: Any
    ):
1165
        """Docstring is inherited from the LGBMModel."""
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
        result = self.predict_proba(
            X=X,
            raw_score=raw_score,
            start_iteration=start_iteration,
            num_iteration=num_iteration,
            pred_leaf=pred_leaf,
            pred_contrib=pred_contrib,
            validate_features=validate_features,
            **kwargs
        )
1176
        if callable(self._objective) or raw_score or pred_leaf or pred_contrib:
1177
1178
1179
1180
            return result
        else:
            class_index = np.argmax(result, axis=1)
            return self._le.inverse_transform(class_index)
wxchan's avatar
wxchan committed
1181

1182
1183
    predict.__doc__ = LGBMModel.predict.__doc__

1184
1185
    def predict_proba(
        self,
1186
        X: _LGBM_ScikitMatrixLike,
1187
1188
1189
1190
1191
1192
1193
1194
        raw_score: bool = False,
        start_iteration: int = 0,
        num_iteration: Optional[int] = None,
        pred_leaf: bool = False,
        pred_contrib: bool = False,
        validate_features: bool = False,
        **kwargs: Any
    ):
1195
        """Docstring is set after definition, using a template."""
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
        result = super().predict(
            X=X,
            raw_score=raw_score,
            start_iteration=start_iteration,
            num_iteration=num_iteration,
            pred_leaf=pred_leaf,
            pred_contrib=pred_contrib,
            validate_features=validate_features,
            **kwargs
        )
1206
        if callable(self._objective) and not (raw_score or pred_leaf or pred_contrib):
1207
1208
1209
            _log_warning("Cannot compute class probabilities or labels "
                         "due to the usage of customized objective function.\n"
                         "Returning raw scores instead.")
1210
            return result
1211
        elif self._n_classes > 2 or raw_score or pred_leaf or pred_contrib:  # type: ignore [operator]
1212
            return result
wxchan's avatar
wxchan committed
1213
        else:
1214
            return np.vstack((1. - result, result)).transpose()
1215

1216
1217
    predict_proba.__doc__ = _lgbmmodel_doc_predict.format(
        description="Return the predicted probability for each class for each sample.",
1218
        X_shape="numpy array, pandas DataFrame, H2O DataTable's Frame , scipy.sparse, list of lists of int or float of shape = [n_samples, n_features]",
1219
        output_name="predicted_probability",
1220
        predicted_result_shape="array-like of shape = [n_samples] or shape = [n_samples, n_classes]",
1221
1222
1223
1224
        X_leaves_shape="array-like of shape = [n_samples, n_trees] or shape = [n_samples, n_trees * n_classes]",
        X_SHAP_values_shape="array-like of shape = [n_samples, n_features + 1] or shape = [n_samples, (n_features + 1) * n_classes] or list with n_classes length of such objects"
    )

1225
    @property
1226
    def classes_(self) -> np.ndarray:
1227
        """:obj:`array` of shape = [n_classes]: The class label array."""
1228
        if not self.__sklearn_is_fitted__():
1229
            raise LGBMNotFittedError('No classes found. Need to call fit beforehand.')
1230
        return self._classes  # type: ignore[return-value]
1231
1232

    @property
1233
    def n_classes_(self) -> int:
1234
        """:obj:`int`: The number of classes."""
1235
        if not self.__sklearn_is_fitted__():
1236
1237
            raise LGBMNotFittedError('No classes found. Need to call fit beforehand.')
        return self._n_classes
wxchan's avatar
wxchan committed
1238

wxchan's avatar
wxchan committed
1239

wxchan's avatar
wxchan committed
1240
class LGBMRanker(LGBMModel):
1241
1242
1243
1244
1245
1246
1247
1248
    """LightGBM ranker.

    .. warning::

        scikit-learn doesn't support ranking applications yet,
        therefore this class is not really compatible with the sklearn ecosystem.
        Please use this class mainly for training and applying ranking models in common sklearnish way.
    """
wxchan's avatar
wxchan committed
1249

1250
    def fit(  # type: ignore[override]
1251
        self,
1252
1253
        X: _LGBM_ScikitMatrixLike,
        y: _LGBM_LabelType,
1254
1255
        sample_weight=None,
        init_score=None,
1256
        group: Optional[_LGBM_GroupType] = None,
1257
        eval_set=None,
1258
        eval_names: Optional[List[str]] = None,
1259
1260
1261
        eval_sample_weight=None,
        eval_init_score=None,
        eval_group=None,
1262
        eval_metric: Optional[_LGBM_ScikitEvalMetricType] = None,
1263
        eval_at: Union[List[int], Tuple[int, ...]] = (1, 2, 3, 4, 5),
1264
1265
        feature_name: _LGBM_FeatureNameConfiguration = 'auto',
        categorical_feature: _LGBM_CategoricalFeatureConfiguration = 'auto',
1266
        callbacks: Optional[List[Callable]] = None,
1267
        init_model: Optional[Union[str, Path, Booster, LGBMModel]] = None
1268
    ) -> "LGBMRanker":
1269
        """Docstring is inherited from the LGBMModel."""
1270
        # check group data
Guolin Ke's avatar
Guolin Ke committed
1271
        if group is None:
1272
            raise ValueError("Should set group for ranking task")
wxchan's avatar
wxchan committed
1273
1274

        if eval_set is not None:
Guolin Ke's avatar
Guolin Ke committed
1275
            if eval_group is None:
1276
                raise ValueError("Eval_group cannot be None when eval_set is not None")
Guolin Ke's avatar
Guolin Ke committed
1277
            elif len(eval_group) != len(eval_set):
1278
                raise ValueError("Length of eval_group should be equal to eval_set")
1279
            elif (isinstance(eval_group, dict)
1280
                  and any(i not in eval_group or eval_group[i] is None for i in range(len(eval_group)))
1281
1282
                  or isinstance(eval_group, list)
                  and any(group is None for group in eval_group)):
1283
1284
                raise ValueError("Should set group for all eval datasets for ranking task; "
                                 "if you use dict, the index should start from 0")
1285

1286
        self._eval_at = eval_at
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
        super().fit(
            X,
            y,
            sample_weight=sample_weight,
            init_score=init_score,
            group=group,
            eval_set=eval_set,
            eval_names=eval_names,
            eval_sample_weight=eval_sample_weight,
            eval_init_score=eval_init_score,
            eval_group=eval_group,
            eval_metric=eval_metric,
            feature_name=feature_name,
            categorical_feature=categorical_feature,
            callbacks=callbacks,
            init_model=init_model
        )
wxchan's avatar
wxchan committed
1304
        return self
1305

1306
    _base_doc = LGBMModel.fit.__doc__.replace("self : LGBMModel", "self : LGBMRanker")  # type: ignore
1307
1308
    fit.__doc__ = (_base_doc[:_base_doc.find('eval_class_weight :')]  # type: ignore
                   + _base_doc[_base_doc.find('eval_init_score :'):])  # type: ignore
1309
    _base_doc = fit.__doc__
1310
    _before_feature_name, _feature_name, _after_feature_name = _base_doc.partition('feature_name :')
1311
    fit.__doc__ = f"""{_before_feature_name}eval_at : list or tuple of int, optional (default=(1, 2, 3, 4, 5))
1312
        The evaluation positions of the specified metric.
1313
    {_feature_name}{_after_feature_name}"""