sklearn.py 62.7 KB
Newer Older
wxchan's avatar
wxchan committed
1
# coding: utf-8
2
"""Scikit-learn wrapper interface for LightGBM."""
3
import copy
4
from inspect import signature
5
from pathlib import Path
6
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
7

wxchan's avatar
wxchan committed
8
import numpy as np
9
import scipy.sparse
10

11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
from .basic import (
    Booster,
    Dataset,
    LightGBMError,
    _choose_param_value,
    _ConfigAliases,
    _LGBM_BoosterBestScoreType,
    _LGBM_CategoricalFeatureConfiguration,
    _LGBM_EvalFunctionResultType,
    _LGBM_FeatureNameConfiguration,
    _LGBM_GroupType,
    _LGBM_InitScoreType,
    _LGBM_LabelType,
    _LGBM_WeightType,
    _log_warning,
)
27
from .callback import _EvalResultDict, record_evaluation
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
from .compat import (
    SKLEARN_INSTALLED,
    LGBMNotFittedError,
    _LGBMAssertAllFinite,
    _LGBMCheckArray,
    _LGBMCheckClassificationTargets,
    _LGBMCheckSampleWeight,
    _LGBMCheckXY,
    _LGBMClassifierBase,
    _LGBMComputeSampleWeight,
    _LGBMCpuCount,
    _LGBMLabelEncoder,
    _LGBMModelBase,
    _LGBMRegressorBase,
    dt_DataTable,
    np_random_Generator,
    pd_DataFrame,
)
wxchan's avatar
wxchan committed
46
from .engine import train
47

48
__all__ = [
49
50
51
52
    "LGBMClassifier",
    "LGBMModel",
    "LGBMRanker",
    "LGBMRegressor",
53
54
]

55
56
57
58
59
_LGBM_ScikitMatrixLike = Union[
    dt_DataTable,
    List[Union[List[float], List[int]]],
    np.ndarray,
    pd_DataFrame,
60
    scipy.sparse.spmatrix,
61
]
62
_LGBM_ScikitCustomObjectiveFunction = Union[
63
    # f(labels, preds)
64
    Callable[
65
        [Optional[np.ndarray], np.ndarray],
66
        Tuple[np.ndarray, np.ndarray],
67
    ],
68
    # f(labels, preds, weights)
69
    Callable[
70
        [Optional[np.ndarray], np.ndarray, Optional[np.ndarray]],
71
        Tuple[np.ndarray, np.ndarray],
72
    ],
73
    # f(labels, preds, weights, group)
74
    Callable[
75
        [Optional[np.ndarray], np.ndarray, Optional[np.ndarray], Optional[np.ndarray]],
76
        Tuple[np.ndarray, np.ndarray],
77
    ],
78
79
]
_LGBM_ScikitCustomEvalFunction = Union[
80
    # f(labels, preds)
81
    Callable[
82
        [Optional[np.ndarray], np.ndarray],
83
        _LGBM_EvalFunctionResultType,
84
85
    ],
    Callable[
86
        [Optional[np.ndarray], np.ndarray],
87
        List[_LGBM_EvalFunctionResultType],
88
    ],
89
    # f(labels, preds, weights)
90
    Callable[
91
        [Optional[np.ndarray], np.ndarray, Optional[np.ndarray]],
92
        _LGBM_EvalFunctionResultType,
93
    ],
94
95
    Callable[
        [Optional[np.ndarray], np.ndarray, Optional[np.ndarray]],
96
        List[_LGBM_EvalFunctionResultType],
97
98
99
100
    ],
    # f(labels, preds, weights, group)
    Callable[
        [Optional[np.ndarray], np.ndarray, Optional[np.ndarray], Optional[np.ndarray]],
101
        _LGBM_EvalFunctionResultType,
102
103
104
    ],
    Callable[
        [Optional[np.ndarray], np.ndarray, Optional[np.ndarray], Optional[np.ndarray]],
105
106
        List[_LGBM_EvalFunctionResultType],
    ],
107
]
108
109
110
_LGBM_ScikitEvalMetricType = Union[
    str,
    _LGBM_ScikitCustomEvalFunction,
111
    List[Union[str, _LGBM_ScikitCustomEvalFunction]],
112
]
113
_LGBM_ScikitValidSet = Tuple[_LGBM_ScikitMatrixLike, _LGBM_LabelType]
114

wxchan's avatar
wxchan committed
115

116
117
118
119
120
121
def _get_group_from_constructed_dataset(dataset: Dataset) -> Optional[np.ndarray]:
    group = dataset.get_group()
    error_msg = (
        "Estimators in lightgbm.sklearn should only retrieve query groups from a constructed Dataset. "
        "If you're seeing this message, it's a bug in lightgbm. Please report it at https://github.com/microsoft/LightGBM/issues."
    )
122
    assert group is None or isinstance(group, np.ndarray), error_msg
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
    return group


def _get_label_from_constructed_dataset(dataset: Dataset) -> np.ndarray:
    label = dataset.get_label()
    error_msg = (
        "Estimators in lightgbm.sklearn should only retrieve labels from a constructed Dataset. "
        "If you're seeing this message, it's a bug in lightgbm. Please report it at https://github.com/microsoft/LightGBM/issues."
    )
    assert isinstance(label, np.ndarray), error_msg
    return label


def _get_weight_from_constructed_dataset(dataset: Dataset) -> Optional[np.ndarray]:
    weight = dataset.get_weight()
    error_msg = (
        "Estimators in lightgbm.sklearn should only retrieve weights from a constructed Dataset. "
        "If you're seeing this message, it's a bug in lightgbm. Please report it at https://github.com/microsoft/LightGBM/issues."
    )
142
    assert weight is None or isinstance(weight, np.ndarray), error_msg
143
144
145
    return weight


146
class _ObjectiveFunctionWrapper:
147
    """Proxy class for objective function."""
148

149
    def __init__(self, func: _LGBM_ScikitCustomObjectiveFunction):
150
        """Construct a proxy class.
151

152
153
        This class transforms objective function to match objective function with signature ``new_func(preds, dataset)``
        as expected by ``lightgbm.engine.train``.
154

155
156
157
        Parameters
        ----------
        func : callable
158
159
160
161
            Expects a callable with following signatures:
            ``func(y_true, y_pred)``,
            ``func(y_true, y_pred, weight)``
            or ``func(y_true, y_pred, weight, group)``
162
163
            and returns (grad, hess):

164
                y_true : numpy 1-D array of shape = [n_samples]
165
                    The target values.
166
                y_pred : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
167
                    The predicted values.
168
169
                    Predicted values are returned before any transformation,
                    e.g. they are raw margin instead of probability of positive class for binary task.
170
171
                weight : numpy 1-D array of shape = [n_samples]
                    The weight of samples. Weights should be non-negative.
172
                group : numpy 1-D array
173
174
175
                    Group/query data.
                    Only used in the learning-to-rank task.
                    sum(group) = n_samples.
176
177
                    For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
                    where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
178
                grad : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape [n_samples, n_classes] (for multi-class task)
179
180
                    The value of the first order derivative (gradient) of the loss
                    with respect to the elements of y_pred for each sample point.
181
                hess : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
182
183
                    The value of the second order derivative (Hessian) of the loss
                    with respect to the elements of y_pred for each sample point.
wxchan's avatar
wxchan committed
184

Nikita Titov's avatar
Nikita Titov committed
185
186
        .. note::

187
            For multi-class task, y_pred is a numpy 2-D array of shape = [n_samples, n_classes],
188
            and grad and hess should be returned in the same format.
189
190
        """
        self.func = func
wxchan's avatar
wxchan committed
191

192
193
194
195
196
    def __call__(
        self,
        preds: np.ndarray,
        dataset: Dataset,
    ) -> Tuple[np.ndarray, np.ndarray]:
197
198
199
200
        """Call passed function with appropriate arguments.

        Parameters
        ----------
201
        preds : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
202
203
204
205
206
207
            The predicted values.
        dataset : Dataset
            The training dataset.

        Returns
        -------
208
        grad : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
209
210
            The value of the first order derivative (gradient) of the loss
            with respect to the elements of preds for each sample point.
211
        hess : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
212
213
            The value of the second order derivative (Hessian) of the loss
            with respect to the elements of preds for each sample point.
214
        """
215
        labels = _get_label_from_constructed_dataset(dataset)
216
        argc = len(signature(self.func).parameters)
217
        if argc == 2:
218
            grad, hess = self.func(labels, preds)  # type: ignore[call-arg]
219
220
221
222
223
224
225
226
227
228
229
230
            return grad, hess

        weight = _get_weight_from_constructed_dataset(dataset)
        if argc == 3:
            grad, hess = self.func(labels, preds, weight)  # type: ignore[call-arg]
            return grad, hess

        if argc == 4:
            group = _get_group_from_constructed_dataset(dataset)
            return self.func(labels, preds, weight, group)  # type: ignore[call-arg]

        raise TypeError(f"Self-defined objective function should have 2, 3 or 4 arguments, got {argc}")
wxchan's avatar
wxchan committed
231

wxchan's avatar
wxchan committed
232

233
class _EvalFunctionWrapper:
234
    """Proxy class for evaluation function."""
235

236
    def __init__(self, func: _LGBM_ScikitCustomEvalFunction):
237
        """Construct a proxy class.
238

239
240
        This class transforms evaluation function to match evaluation function with signature ``new_func(preds, dataset)``
        as expected by ``lightgbm.engine.train``.
241

242
243
244
245
246
247
248
249
250
251
        Parameters
        ----------
        func : callable
            Expects a callable with following signatures:
            ``func(y_true, y_pred)``,
            ``func(y_true, y_pred, weight)``
            or ``func(y_true, y_pred, weight, group)``
            and returns (eval_name, eval_result, is_higher_better) or
            list of (eval_name, eval_result, is_higher_better):

252
                y_true : numpy 1-D array of shape = [n_samples]
253
                    The target values.
254
                y_pred : numpy 1-D array of shape = [n_samples] or numpy 2-D array shape = [n_samples, n_classes] (for multi-class task)
255
                    The predicted values.
256
257
                    In case of custom ``objective``, predicted values are returned before any transformation,
                    e.g. they are raw margin instead of probability of positive class for binary task in this case.
258
                weight : numpy 1-D array of shape = [n_samples]
259
                    The weight of samples. Weights should be non-negative.
260
                group : numpy 1-D array
261
262
263
                    Group/query data.
                    Only used in the learning-to-rank task.
                    sum(group) = n_samples.
264
265
                    For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
                    where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
266
                eval_name : str
Andrew Ziem's avatar
Andrew Ziem committed
267
                    The name of evaluation function (without whitespace).
268
269
270
271
272
273
                eval_result : float
                    The eval result.
                is_higher_better : bool
                    Is eval result higher better, e.g. AUC is ``is_higher_better``.
        """
        self.func = func
274

275
276
277
    def __call__(
        self,
        preds: np.ndarray,
278
        dataset: Dataset,
279
    ) -> Union[_LGBM_EvalFunctionResultType, List[_LGBM_EvalFunctionResultType]]:
280
        """Call passed function with appropriate arguments.
281

282
283
        Parameters
        ----------
284
        preds : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
285
286
287
288
289
290
            The predicted values.
        dataset : Dataset
            The training dataset.

        Returns
        -------
291
        eval_name : str
Andrew Ziem's avatar
Andrew Ziem committed
292
            The name of evaluation function (without whitespace).
293
294
295
296
297
        eval_result : float
            The eval result.
        is_higher_better : bool
            Is eval result higher better, e.g. AUC is ``is_higher_better``.
        """
298
        labels = _get_label_from_constructed_dataset(dataset)
299
        argc = len(signature(self.func).parameters)
300
        if argc == 2:
301
            return self.func(labels, preds)  # type: ignore[call-arg]
302
303
304
305
306
307
308
309
310
311

        weight = _get_weight_from_constructed_dataset(dataset)
        if argc == 3:
            return self.func(labels, preds, weight)  # type: ignore[call-arg]

        if argc == 4:
            group = _get_group_from_constructed_dataset(dataset)
            return self.func(labels, preds, weight, group)  # type: ignore[call-arg]

        raise TypeError(f"Self-defined eval function should have 2, 3 or 4 arguments, got {argc}")
312

wxchan's avatar
wxchan committed
313

314
315
316
# documentation templates for LGBMModel methods are shared between the classes in
# this module and those in the ``dask`` module

317
_lgbmmodel_doc_fit = """
318
319
320
321
322
323
324
325
326
    Build a gradient boosting model from the training set (X, y).

    Parameters
    ----------
    X : {X_shape}
        Input feature matrix.
    y : {y_shape}
        The target values (class labels in classification, real numbers in regression).
    sample_weight : {sample_weight_shape}
327
        Weights of training data. Weights should be non-negative.
328
    init_score : {init_score_shape}
329
330
331
332
333
334
335
336
337
        Init score of training data.
    group : {group_shape}
        Group/query data.
        Only used in the learning-to-rank task.
        sum(group) = n_samples.
        For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
        where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
    eval_set : list or None, optional (default=None)
        A list of (X, y) tuple pairs to use as validation sets.
338
    eval_names : list of str, or None, optional (default=None)
339
        Names of eval_set.
340
    eval_sample_weight : {eval_sample_weight_shape}
341
        Weights of eval data. Weights should be non-negative.
342
343
    eval_class_weight : list or None, optional (default=None)
        Class weights of eval data.
344
    eval_init_score : {eval_init_score_shape}
345
        Init score of eval data.
346
    eval_group : {eval_group_shape}
347
        Group data of eval data.
348
349
    eval_metric : str, callable, list or None, optional (default=None)
        If str, it should be a built-in evaluation metric to use.
350
351
352
353
        If callable, it should be a custom evaluation metric, see note below for more details.
        If list, it can be a list of built-in metrics, a list of custom evaluation metrics, or a mix of both.
        In either case, the ``metric`` from the model parameters will be evaluated and used as well.
        Default: 'l2' for LGBMRegressor, 'logloss' for LGBMClassifier, 'ndcg' for LGBMRanker.
354
    feature_name : list of str, or 'auto', optional (default='auto')
355
356
        Feature names.
        If 'auto' and data is pandas DataFrame, data columns names are used.
357
    categorical_feature : list of str or int, or 'auto', optional (default='auto')
358
359
        Categorical features.
        If list of int, interpreted as indices.
360
        If list of str, interpreted as feature names (need to specify ``feature_name`` as well).
361
        If 'auto' and data is pandas DataFrame, pandas unordered categorical columns are used.
362
        All values in categorical features will be cast to int32 and thus should be less than int32 max value (2147483647).
363
364
365
        Large values could be memory consuming. Consider using consecutive integers starting from zero.
        All negative values in categorical features will be treated as missing values.
        The output cannot be monotonically constrained with respect to a categorical feature.
366
        Floating point numbers in categorical features will be rounded towards 0.
367
    callbacks : list of callable, or None, optional (default=None)
368
369
        List of callback functions that are applied at each iteration.
        See Callbacks in Python API for more information.
370
    init_model : str, pathlib.Path, Booster, LGBMModel or None, optional (default=None)
371
372
373
374
        Filename of LightGBM model, Booster instance or LGBMModel instance used for continue training.

    Returns
    -------
375
    self : LGBMModel
376
377
378
379
380
381
382
383
384
385
386
387
        Returns self.
    """

_lgbmmodel_doc_custom_eval_note = """
    Note
    ----
    Custom eval function expects a callable with following signatures:
    ``func(y_true, y_pred)``, ``func(y_true, y_pred, weight)`` or
    ``func(y_true, y_pred, weight, group)``
    and returns (eval_name, eval_result, is_higher_better) or
    list of (eval_name, eval_result, is_higher_better):

388
        y_true : numpy 1-D array of shape = [n_samples]
389
            The target values.
390
        y_pred : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
391
            The predicted values.
392
393
            In case of custom ``objective``, predicted values are returned before any transformation,
            e.g. they are raw margin instead of probability of positive class for binary task in this case.
394
        weight : numpy 1-D array of shape = [n_samples]
395
            The weight of samples. Weights should be non-negative.
396
        group : numpy 1-D array
397
398
399
400
401
            Group/query data.
            Only used in the learning-to-rank task.
            sum(group) = n_samples.
            For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
            where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
402
        eval_name : str
Andrew Ziem's avatar
Andrew Ziem committed
403
            The name of evaluation function (without whitespace).
404
405
406
407
408
409
        eval_result : float
            The eval result.
        is_higher_better : bool
            Is eval result higher better, e.g. AUC is ``is_higher_better``.
"""

410
_lgbmmodel_doc_predict = """
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
    {description}

    Parameters
    ----------
    X : {X_shape}
        Input features matrix.
    raw_score : bool, optional (default=False)
        Whether to predict raw scores.
    start_iteration : int, optional (default=0)
        Start index of the iteration to predict.
        If <= 0, starts from the first iteration.
    num_iteration : int or None, optional (default=None)
        Total number of iterations used in the prediction.
        If None, if the best iteration exists and start_iteration <= 0, the best iteration is used;
        otherwise, all iterations from ``start_iteration`` are used (no limits).
        If <= 0, all iterations from ``start_iteration`` are used (no limits).
    pred_leaf : bool, optional (default=False)
        Whether to predict leaf index.
    pred_contrib : bool, optional (default=False)
        Whether to predict feature contributions.

        .. note::

            If you want to get more explanations for your model's predictions using SHAP values,
            like SHAP interaction values,
            you can install the shap package (https://github.com/slundberg/shap).
            Note that unlike the shap package, with ``pred_contrib`` we return a matrix with an extra
            column, where the last column is the expected value.

440
441
442
    validate_features : bool, optional (default=False)
        If True, ensure that the features used to predict match the ones used to train.
        Used only if data is pandas DataFrame.
443
444
445
446
447
448
449
450
451
452
453
454
455
456
    **kwargs
        Other parameters for the prediction.

    Returns
    -------
    {output_name} : {predicted_result_shape}
        The predicted values.
    X_leaves : {X_leaves_shape}
        If ``pred_leaf=True``, the predicted leaf of every tree for each sample.
    X_SHAP_values : {X_SHAP_values_shape}
        If ``pred_contrib=True``, the feature contributions for each sample.
    """


457
458
class LGBMModel(_LGBMModelBase):
    """Implementation of the scikit-learn API for LightGBM."""
wxchan's avatar
wxchan committed
459

460
461
    def __init__(
        self,
462
        boosting_type: str = "gbdt",
463
464
465
466
467
        num_leaves: int = 31,
        max_depth: int = -1,
        learning_rate: float = 0.1,
        n_estimators: int = 100,
        subsample_for_bin: int = 200000,
468
        objective: Optional[Union[str, _LGBM_ScikitCustomObjectiveFunction]] = None,
469
        class_weight: Optional[Union[Dict, str]] = None,
470
        min_split_gain: float = 0.0,
471
472
        min_child_weight: float = 1e-3,
        min_child_samples: int = 20,
473
        subsample: float = 1.0,
474
        subsample_freq: int = 0,
475
476
477
478
        colsample_bytree: float = 1.0,
        reg_alpha: float = 0.0,
        reg_lambda: float = 0.0,
        random_state: Optional[Union[int, np.random.RandomState, "np.random.Generator"]] = None,
479
        n_jobs: Optional[int] = None,
480
481
        importance_type: str = "split",
        **kwargs,
482
    ):
483
        r"""Construct a gradient boosting model.
wxchan's avatar
wxchan committed
484
485
486

        Parameters
        ----------
487
        boosting_type : str, optional (default='gbdt')
488
489
490
491
            'gbdt', traditional Gradient Boosting Decision Tree.
            'dart', Dropouts meet Multiple Additive Regression Trees.
            'rf', Random Forest.
        num_leaves : int, optional (default=31)
wxchan's avatar
wxchan committed
492
            Maximum tree leaves for base learners.
493
        max_depth : int, optional (default=-1)
494
            Maximum tree depth for base learners, <=0 means no limit.
495
        learning_rate : float, optional (default=0.1)
496
            Boosting learning rate.
497
498
499
            You can use ``callbacks`` parameter of ``fit`` method to shrink/adapt learning rate
            in training using ``reset_parameter`` callback.
            Note, that this will ignore the ``learning_rate`` argument in training.
500
        n_estimators : int, optional (default=100)
wxchan's avatar
wxchan committed
501
            Number of boosted trees to fit.
502
        subsample_for_bin : int, optional (default=200000)
wxchan's avatar
wxchan committed
503
            Number of samples for constructing bins.
504
        objective : str, callable or None, optional (default=None)
wxchan's avatar
wxchan committed
505
506
            Specify the learning task and the corresponding learning objective or
            a custom objective function to be used (see note below).
507
            Default: 'regression' for LGBMRegressor, 'binary' or 'multiclass' for LGBMClassifier, 'lambdarank' for LGBMRanker.
508
509
510
511
        class_weight : dict, 'balanced' or None, optional (default=None)
            Weights associated with classes in the form ``{class_label: weight}``.
            Use this parameter only for multi-class classification task;
            for binary classification task you may use ``is_unbalance`` or ``scale_pos_weight`` parameters.
512
513
514
            Note, that the usage of all these parameters will result in poor estimates of the individual class probabilities.
            You may want to consider performing probability calibration
            (https://scikit-learn.org/stable/modules/calibration.html) of your model.
515
516
517
            The 'balanced' mode uses the values of y to automatically adjust weights
            inversely proportional to class frequencies in the input data as ``n_samples / (n_classes * np.bincount(y))``.
            If None, all classes are supposed to have weight one.
518
            Note, that these weights will be multiplied with ``sample_weight`` (passed through the ``fit`` method)
519
            if ``sample_weight`` is specified.
520
        min_split_gain : float, optional (default=0.)
wxchan's avatar
wxchan committed
521
            Minimum loss reduction required to make a further partition on a leaf node of the tree.
522
        min_child_weight : float, optional (default=1e-3)
523
            Minimum sum of instance weight (Hessian) needed in a child (leaf).
524
        min_child_samples : int, optional (default=20)
525
            Minimum number of data needed in a child (leaf).
526
        subsample : float, optional (default=1.)
wxchan's avatar
wxchan committed
527
            Subsample ratio of the training instance.
528
        subsample_freq : int, optional (default=0)
Andrew Ziem's avatar
Andrew Ziem committed
529
            Frequency of subsample, <=0 means no enable.
530
        colsample_bytree : float, optional (default=1.)
wxchan's avatar
wxchan committed
531
            Subsample ratio of columns when constructing each tree.
532
        reg_alpha : float, optional (default=0.)
533
            L1 regularization term on weights.
534
        reg_lambda : float, optional (default=0.)
535
            L2 regularization term on weights.
536
        random_state : int, RandomState object or None, optional (default=None)
wxchan's avatar
wxchan committed
537
            Random number seed.
538
            If int, this number is used to seed the C++ code.
539
            If RandomState or Generator object (numpy), a random integer is picked based on its state to seed the C++ code.
540
            If None, default seeds in C++ code are used.
541
542
543
544
545
546
547
548
549
550
551
552
        n_jobs : int or None, optional (default=None)
            Number of parallel threads to use for training (can be changed at prediction time by
            passing it as an extra keyword argument).

            For better performance, it is recommended to set this to the number of physical cores
            in the CPU.

            Negative integers are interpreted as following joblib's formula (n_cpus + 1 + n_jobs), just like
            scikit-learn (so e.g. -1 means using all threads). A value of zero corresponds the default number of
            threads configured for OpenMP in the system. A value of ``None`` (the default) corresponds
            to using the number of physical cores in the system (its correct detection requires
            either the ``joblib`` or the ``psutil`` util libraries to be installed).
553
554
555

            .. versionchanged:: 4.0.0

556
        importance_type : str, optional (default='split')
557
            The type of feature importance to be filled into ``feature_importances_``.
558
559
560
561
            If 'split', result contains numbers of times the feature is used in a model.
            If 'gain', result contains total gains of splits which use the feature.
        **kwargs
            Other parameters for the model.
wxchan's avatar
wxchan committed
562
            Check http://lightgbm.readthedocs.io/en/latest/Parameters.html for more parameters.
563

Nikita Titov's avatar
Nikita Titov committed
564
565
566
            .. warning::

                \*\*kwargs is not supported in sklearn, it may cause unexpected issues.
wxchan's avatar
wxchan committed
567
568
569

        Note
        ----
570
571
        A custom objective function can be provided for the ``objective`` parameter.
        In this case, it should have the signature
572
573
574
        ``objective(y_true, y_pred) -> grad, hess``,
        ``objective(y_true, y_pred, weight) -> grad, hess``
        or ``objective(y_true, y_pred, weight, group) -> grad, hess``:
wxchan's avatar
wxchan committed
575

576
            y_true : numpy 1-D array of shape = [n_samples]
577
                The target values.
578
            y_pred : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
579
                The predicted values.
580
581
                Predicted values are returned before any transformation,
                e.g. they are raw margin instead of probability of positive class for binary task.
582
583
            weight : numpy 1-D array of shape = [n_samples]
                The weight of samples. Weights should be non-negative.
584
            group : numpy 1-D array
585
586
587
                Group/query data.
                Only used in the learning-to-rank task.
                sum(group) = n_samples.
588
589
                For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
                where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
590
            grad : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
591
592
                The value of the first order derivative (gradient) of the loss
                with respect to the elements of y_pred for each sample point.
593
            hess : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
594
595
                The value of the second order derivative (Hessian) of the loss
                with respect to the elements of y_pred for each sample point.
wxchan's avatar
wxchan committed
596

597
        For multi-class task, y_pred is a numpy 2-D array of shape = [n_samples, n_classes],
598
        and grad and hess should be returned in the same format.
wxchan's avatar
wxchan committed
599
        """
wxchan's avatar
wxchan committed
600
        if not SKLEARN_INSTALLED:
601
602
603
604
            raise LightGBMError(
                "scikit-learn is required for lightgbm.sklearn. "
                "You must install scikit-learn and restart your session to use this module."
            )
wxchan's avatar
wxchan committed
605

606
        self.boosting_type = boosting_type
607
        self.objective = objective
wxchan's avatar
wxchan committed
608
609
610
611
        self.num_leaves = num_leaves
        self.max_depth = max_depth
        self.learning_rate = learning_rate
        self.n_estimators = n_estimators
wxchan's avatar
wxchan committed
612
        self.subsample_for_bin = subsample_for_bin
wxchan's avatar
wxchan committed
613
614
615
616
617
618
619
620
        self.min_split_gain = min_split_gain
        self.min_child_weight = min_child_weight
        self.min_child_samples = min_child_samples
        self.subsample = subsample
        self.subsample_freq = subsample_freq
        self.colsample_bytree = colsample_bytree
        self.reg_alpha = reg_alpha
        self.reg_lambda = reg_lambda
621
622
        self.random_state = random_state
        self.n_jobs = n_jobs
623
        self.importance_type = importance_type
624
        self._Booster: Optional[Booster] = None
625
626
        self._evals_result: _EvalResultDict = {}
        self._best_score: _LGBM_BoosterBestScoreType = {}
627
        self._best_iteration: int = -1
628
        self._other_params: Dict[str, Any] = {}
629
        self._objective = objective
630
        self.class_weight = class_weight
631
632
        self._class_weight: Optional[Union[Dict, str]] = None
        self._class_map: Optional[Dict[int, int]] = None
633
634
        self._n_features: int = -1
        self._n_features_in: int = -1
635
        self._classes: Optional[np.ndarray] = None
636
        self._n_classes: int = -1
637
        self.set_params(**kwargs)
wxchan's avatar
wxchan committed
638

639
    def _more_tags(self) -> Dict[str, Any]:
640
        return {
641
642
643
644
645
646
647
            "allow_nan": True,
            "X_types": ["2darray", "sparse", "1dlabels"],
            "_xfail_checks": {
                "check_no_attributes_set_in_init": "scikit-learn incorrectly asserts that private attributes "
                "cannot be set in __init__: "
                "(see https://github.com/microsoft/LightGBM/issues/2628)"
            },
648
        }
Nikita Titov's avatar
Nikita Titov committed
649

650
651
652
    def __sklearn_is_fitted__(self) -> bool:
        return getattr(self, "fitted_", False)

653
    def get_params(self, deep: bool = True) -> Dict[str, Any]:
654
655
656
657
658
659
660
661
662
663
664
665
666
        """Get parameters for this estimator.

        Parameters
        ----------
        deep : bool, optional (default=True)
            If True, will return the parameters for this estimator and
            contained subobjects that are estimators.

        Returns
        -------
        params : dict
            Parameter names mapped to their values.
        """
667
        params = super().get_params(deep=deep)
668
        params.update(self._other_params)
wxchan's avatar
wxchan committed
669
670
        return params

671
    def set_params(self, **params: Any) -> "LGBMModel":
672
673
674
675
676
677
678
679
680
681
682
683
        """Set the parameters of this estimator.

        Parameters
        ----------
        **params
            Parameter names with their new values.

        Returns
        -------
        self : object
            Returns self.
        """
wxchan's avatar
wxchan committed
684
685
        for key, value in params.items():
            setattr(self, key, value)
686
687
            if hasattr(self, f"_{key}"):
                setattr(self, f"_{key}", value)
688
            self._other_params[key] = value
wxchan's avatar
wxchan committed
689
        return self
wxchan's avatar
wxchan committed
690

691
692
693
694
695
696
697
698
699
700
701
702
703
704
    def _process_params(self, stage: str) -> Dict[str, Any]:
        """Process the parameters of this estimator based on its type, parameter aliases, etc.

        Parameters
        ----------
        stage : str
            Name of the stage (can be ``fit`` or ``predict``) this method is called from.

        Returns
        -------
        processed_params : dict
            Processed parameter names mapped to their values.
        """
        assert stage in {"fit", "predict"}
705
706
        params = self.get_params()

707
708
        params.pop("objective", None)
        for alias in _ConfigAliases.get("objective"):
709
            if alias in params:
710
                obj = params.pop(alias)
711
                _log_warning(f"Found '{alias}' in params. Will use it instead of 'objective' argument")
712
713
714
715
716
717
718
719
720
721
722
723
724
                if stage == "fit":
                    self._objective = obj
        if stage == "fit":
            if self._objective is None:
                if isinstance(self, LGBMRegressor):
                    self._objective = "regression"
                elif isinstance(self, LGBMClassifier):
                    if self._n_classes > 2:
                        self._objective = "multiclass"
                    else:
                        self._objective = "binary"
                elif isinstance(self, LGBMRanker):
                    self._objective = "lambdarank"
725
                else:
726
                    raise ValueError("Unknown LGBMModel type.")
727
        if callable(self._objective):
728
            if stage == "fit":
729
                params["objective"] = _ObjectiveFunctionWrapper(self._objective)
730
            else:
731
                params["objective"] = "None"
732
        else:
733
            params["objective"] = self._objective
734

735
736
737
        params.pop("importance_type", None)
        params.pop("n_estimators", None)
        params.pop("class_weight", None)
738

739
740
741
742
        if isinstance(params["random_state"], np.random.RandomState):
            params["random_state"] = params["random_state"].randint(np.iinfo(np.int32).max)
        elif isinstance(params["random_state"], np_random_Generator):
            params["random_state"] = int(params["random_state"].integers(np.iinfo(np.int32).max))
743
        if self._n_classes > 2:
744
            for alias in _ConfigAliases.get("num_class"):
745
                params.pop(alias, None)
746
747
            params["num_class"] = self._n_classes
        if hasattr(self, "_eval_at"):
748
            eval_at = self._eval_at
749
            for alias in _ConfigAliases.get("eval_at"):
750
751
752
                if alias in params:
                    _log_warning(f"Found '{alias}' in params. Will use it instead of 'eval_at' argument")
                    eval_at = params.pop(alias)
753
            params["eval_at"] = eval_at
wxchan's avatar
wxchan committed
754

755
        # register default metric for consistency with callable eval_metric case
756
        original_metric = self._objective if isinstance(self._objective, str) else None
757
758
759
760
761
762
763
764
765
766
        if original_metric is None:
            # try to deduce from class instance
            if isinstance(self, LGBMRegressor):
                original_metric = "l2"
            elif isinstance(self, LGBMClassifier):
                original_metric = "multi_logloss" if self._n_classes > 2 else "binary_logloss"
            elif isinstance(self, LGBMRanker):
                original_metric = "ndcg"

        # overwrite default metric by explicitly set metric
767
        params = _choose_param_value("metric", params, original_metric)
768

769
770
771
772
773
774
        # use joblib conventions for negative n_jobs, just like scikit-learn
        # at predict time, this is handled later due to the order of parameter updates
        if stage == "fit":
            params = _choose_param_value("num_threads", params, self.n_jobs)
            params["num_threads"] = self._process_n_jobs(params["num_threads"])

775
776
        return params

777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
    def _process_n_jobs(self, n_jobs: Optional[int]) -> int:
        """Convert special values of n_jobs to their actual values according to the formulas that apply.

        Parameters
        ----------
        n_jobs : int or None
            The original value of n_jobs, potentially having special values such as 'None' or
            negative integers.

        Returns
        -------
        n_jobs : int
            The value of n_jobs with special values converted to actual number of threads.
        """
        if n_jobs is None:
            n_jobs = _LGBMCpuCount(only_physical_cores=True)
        elif n_jobs < 0:
            n_jobs = max(_LGBMCpuCount(only_physical_cores=False) + 1 + n_jobs, 1)
        return n_jobs

797
798
    def fit(
        self,
799
800
        X: _LGBM_ScikitMatrixLike,
        y: _LGBM_LabelType,
801
802
        sample_weight: Optional[_LGBM_WeightType] = None,
        init_score: Optional[_LGBM_InitScoreType] = None,
803
        group: Optional[_LGBM_GroupType] = None,
804
        eval_set: Optional[List[_LGBM_ScikitValidSet]] = None,
805
        eval_names: Optional[List[str]] = None,
806
807
808
809
        eval_sample_weight: Optional[List[_LGBM_WeightType]] = None,
        eval_class_weight: Optional[List[float]] = None,
        eval_init_score: Optional[List[_LGBM_InitScoreType]] = None,
        eval_group: Optional[List[_LGBM_GroupType]] = None,
810
        eval_metric: Optional[_LGBM_ScikitEvalMetricType] = None,
811
812
        feature_name: _LGBM_FeatureNameConfiguration = "auto",
        categorical_feature: _LGBM_CategoricalFeatureConfiguration = "auto",
813
        callbacks: Optional[List[Callable]] = None,
814
        init_model: Optional[Union[str, Path, Booster, "LGBMModel"]] = None,
815
    ) -> "LGBMModel":
816
817
818
819
820
        """Docstring is set after definition, using a template."""
        params = self._process_params(stage="fit")

        # Do not modify original args in fit function
        # Refer to https://github.com/microsoft/LightGBM/pull/2619
821
822
823
824
825
826
827
        eval_metric_list: List[Union[str, _LGBM_ScikitCustomEvalFunction]]
        if eval_metric is None:
            eval_metric_list = []
        elif isinstance(eval_metric, list):
            eval_metric_list = copy.deepcopy(eval_metric)
        else:
            eval_metric_list = [copy.deepcopy(eval_metric)]
828
829
830
831
832

        # Separate built-in from callable evaluation metrics
        eval_metrics_callable = [_EvalFunctionWrapper(f) for f in eval_metric_list if callable(f)]
        eval_metrics_builtin = [m for m in eval_metric_list if isinstance(m, str)]

833
        # concatenate metric from params (or default if not provided in params) and eval_metric
834
835
836
        params["metric"] = [params["metric"]] if isinstance(params["metric"], (str, type(None))) else params["metric"]
        params["metric"] = [e for e in eval_metrics_builtin if e not in params["metric"]] + params["metric"]
        params["metric"] = [metric for metric in params["metric"] if metric is not None]
wxchan's avatar
wxchan committed
837

838
        if not isinstance(X, (pd_DataFrame, dt_DataTable)):
839
            _X, _y = _LGBMCheckXY(X, y, accept_sparse=True, force_all_finite=False, ensure_min_samples=2)
840
841
            if sample_weight is not None:
                sample_weight = _LGBMCheckSampleWeight(sample_weight, _X)
842
843
        else:
            _X, _y = X, y
844

845
846
847
848
        if self._class_weight is None:
            self._class_weight = self.class_weight
        if self._class_weight is not None:
            class_sample_weight = _LGBMComputeSampleWeight(self._class_weight, y)
849
850
851
852
            if sample_weight is None or len(sample_weight) == 0:
                sample_weight = class_sample_weight
            else:
                sample_weight = np.multiply(sample_weight, class_sample_weight)
853

854
        self._n_features = _X.shape[1]
855
856
        # copy for consistency
        self._n_features_in = self._n_features
857

858
859
860
861
862
863
864
865
866
        train_set = Dataset(
            data=_X,
            label=_y,
            weight=sample_weight,
            group=group,
            init_score=init_score,
            categorical_feature=categorical_feature,
            params=params,
        )
Guolin Ke's avatar
Guolin Ke committed
867

868
        valid_sets: List[Dataset] = []
Guolin Ke's avatar
Guolin Ke committed
869
        if eval_set is not None:
870

871
            def _get_meta_data(collection, name, i):
872
873
874
875
876
877
878
                if collection is None:
                    return None
                elif isinstance(collection, list):
                    return collection[i] if len(collection) > i else None
                elif isinstance(collection, dict):
                    return collection.get(i, None)
                else:
879
                    raise TypeError(f"{name} should be dict or list")
880

Guolin Ke's avatar
Guolin Ke committed
881
882
883
            if isinstance(eval_set, tuple):
                eval_set = [eval_set]
            for i, valid_data in enumerate(eval_set):
884
                # reduce cost for prediction training data
Guolin Ke's avatar
Guolin Ke committed
885
886
887
                if valid_data[0] is X and valid_data[1] is y:
                    valid_set = train_set
                else:
888
889
                    valid_weight = _get_meta_data(eval_sample_weight, "eval_sample_weight", i)
                    valid_class_weight = _get_meta_data(eval_class_weight, "eval_class_weight", i)
890
891
892
893
                    if valid_class_weight is not None:
                        if isinstance(valid_class_weight, dict) and self._class_map is not None:
                            valid_class_weight = {self._class_map[k]: v for k, v in valid_class_weight.items()}
                        valid_class_sample_weight = _LGBMComputeSampleWeight(valid_class_weight, valid_data[1])
894
895
896
897
                        if valid_weight is None or len(valid_weight) == 0:
                            valid_weight = valid_class_sample_weight
                        else:
                            valid_weight = np.multiply(valid_weight, valid_class_sample_weight)
898
899
900
901
902
903
904
905
906
907
908
                    valid_init_score = _get_meta_data(eval_init_score, "eval_init_score", i)
                    valid_group = _get_meta_data(eval_group, "eval_group", i)
                    valid_set = Dataset(
                        data=valid_data[0],
                        label=valid_data[1],
                        weight=valid_weight,
                        group=valid_group,
                        init_score=valid_init_score,
                        categorical_feature="auto",
                        params=params,
                    )
909

Guolin Ke's avatar
Guolin Ke committed
910
911
                valid_sets.append(valid_set)

912
913
914
        if isinstance(init_model, LGBMModel):
            init_model = init_model.booster_

915
916
917
        if callbacks is None:
            callbacks = []
        else:
918
            callbacks = copy.copy(callbacks)  # don't use deepcopy here to allow non-serializable objects
919

920
        evals_result: _EvalResultDict = {}
921
922
923
924
925
926
927
928
        callbacks.append(record_evaluation(evals_result))

        self._Booster = train(
            params=params,
            train_set=train_set,
            num_boost_round=self.n_estimators,
            valid_sets=valid_sets,
            valid_names=eval_names,
929
            feval=eval_metrics_callable,  # type: ignore[arg-type]
930
931
            init_model=init_model,
            feature_name=feature_name,
932
            callbacks=callbacks,
933
        )
wxchan's avatar
wxchan committed
934

935
        self._evals_result = evals_result
936
        self._best_iteration = self._Booster.best_iteration
937
        self._best_score = self._Booster.best_score
wxchan's avatar
wxchan committed
938

939
940
        self.fitted_ = True

wxchan's avatar
wxchan committed
941
        # free dataset
942
        self._Booster.free_dataset()
wxchan's avatar
wxchan committed
943
        del train_set, valid_sets
wxchan's avatar
wxchan committed
944
945
        return self

946
947
948
949
950
951
952
953
954
955
956
957
958
959
    fit.__doc__ = (
        _lgbmmodel_doc_fit.format(
            X_shape="numpy array, pandas DataFrame, H2O DataTable's Frame , scipy.sparse, list of lists of int or float of shape = [n_samples, n_features]",
            y_shape="numpy array, pandas DataFrame, pandas Series, list of int or float of shape = [n_samples]",
            sample_weight_shape="numpy array, pandas Series, list of int or float of shape = [n_samples] or None, optional (default=None)",
            init_score_shape="numpy array, pandas DataFrame, pandas Series, list of int or float of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task) or shape = [n_samples, n_classes] (for multi-class task) or None, optional (default=None)",
            group_shape="numpy array, pandas Series, list of int or float, or None, optional (default=None)",
            eval_sample_weight_shape="list of array (same types as ``sample_weight`` supports), or None, optional (default=None)",
            eval_init_score_shape="list of array (same types as ``init_score`` supports), or None, optional (default=None)",
            eval_group_shape="list of array (same types as ``group`` supports), or None, optional (default=None)",
        )
        + "\n\n"
        + _lgbmmodel_doc_custom_eval_note
    )
960

961
962
    def predict(
        self,
963
        X: _LGBM_ScikitMatrixLike,
964
965
966
967
968
969
        raw_score: bool = False,
        start_iteration: int = 0,
        num_iteration: Optional[int] = None,
        pred_leaf: bool = False,
        pred_contrib: bool = False,
        validate_features: bool = False,
970
        **kwargs: Any,
971
    ):
972
        """Docstring is set after definition, using a template."""
973
        if not self.__sklearn_is_fitted__():
974
            raise LGBMNotFittedError("Estimator not fitted, call fit before exploiting the model.")
975
        if not isinstance(X, (pd_DataFrame, dt_DataTable)):
976
            X = _LGBMCheckArray(X, accept_sparse=True, force_all_finite=False)
977
978
        n_features = X.shape[1]
        if self._n_features != n_features:
979
980
981
982
983
            raise ValueError(
                "Number of features of the model must "
                f"match the input. Model n_features_ is {self._n_features} and "
                f"input n_features is {n_features}"
            )
984
985
986
987
988
989
990
991
992
993
994
        # retrive original params that possibly can be used in both training and prediction
        # and then overwrite them (considering aliases) with params that were passed directly in prediction
        predict_params = self._process_params(stage="predict")
        for alias in _ConfigAliases.get_by_alias(
            "data",
            "X",
            "raw_score",
            "start_iteration",
            "num_iteration",
            "pred_leaf",
            "pred_contrib",
995
            *kwargs.keys(),
996
997
998
        ):
            predict_params.pop(alias, None)
        predict_params.update(kwargs)
999
1000
1001

        # number of threads can have values with special meaning which is only applied
        # in the scikit-learn interface, these should not reach the c++ side as-is
1002
1003
        predict_params = _choose_param_value("num_threads", predict_params, self.n_jobs)
        predict_params["num_threads"] = self._process_n_jobs(predict_params["num_threads"])
1004

1005
        return self._Booster.predict(  # type: ignore[union-attr]
1006
1007
1008
1009
1010
1011
1012
1013
            X,
            raw_score=raw_score,
            start_iteration=start_iteration,
            num_iteration=num_iteration,
            pred_leaf=pred_leaf,
            pred_contrib=pred_contrib,
            validate_features=validate_features,
            **predict_params,
1014
        )
wxchan's avatar
wxchan committed
1015

1016
1017
    predict.__doc__ = _lgbmmodel_doc_predict.format(
        description="Return the predicted value for each sample.",
1018
        X_shape="numpy array, pandas DataFrame, H2O DataTable's Frame , scipy.sparse, list of lists of int or float of shape = [n_samples, n_features]",
1019
1020
1021
        output_name="predicted_result",
        predicted_result_shape="array-like of shape = [n_samples] or shape = [n_samples, n_classes]",
        X_leaves_shape="array-like of shape = [n_samples, n_trees] or shape = [n_samples, n_trees * n_classes]",
1022
        X_SHAP_values_shape="array-like of shape = [n_samples, n_features + 1] or shape = [n_samples, (n_features + 1) * n_classes] or list with n_classes length of such objects",
1023
1024
    )

1025
    @property
1026
    def n_features_(self) -> int:
1027
        """:obj:`int`: The number of features of fitted model."""
1028
        if not self.__sklearn_is_fitted__():
1029
            raise LGBMNotFittedError("No n_features found. Need to call fit beforehand.")
1030
1031
        return self._n_features

1032
    @property
1033
    def n_features_in_(self) -> int:
1034
        """:obj:`int`: The number of features of fitted model."""
1035
        if not self.__sklearn_is_fitted__():
1036
            raise LGBMNotFittedError("No n_features_in found. Need to call fit beforehand.")
1037
1038
        return self._n_features_in

1039
    @property
1040
    def best_score_(self) -> _LGBM_BoosterBestScoreType:
1041
        """:obj:`dict`: The best score of fitted model."""
1042
        if not self.__sklearn_is_fitted__():
1043
            raise LGBMNotFittedError("No best_score found. Need to call fit beforehand.")
1044
1045
1046
        return self._best_score

    @property
1047
    def best_iteration_(self) -> int:
1048
        """:obj:`int`: The best iteration of fitted model if ``early_stopping()`` callback has been specified."""
1049
        if not self.__sklearn_is_fitted__():
1050
1051
1052
            raise LGBMNotFittedError(
                "No best_iteration found. Need to call fit with early_stopping callback beforehand."
            )
1053
1054
1055
        return self._best_iteration

    @property
1056
    def objective_(self) -> Union[str, _LGBM_ScikitCustomObjectiveFunction]:
1057
        """:obj:`str` or :obj:`callable`: The concrete objective used while fitting this model."""
1058
        if not self.__sklearn_is_fitted__():
1059
            raise LGBMNotFittedError("No objective found. Need to call fit beforehand.")
1060
        return self._objective  # type: ignore[return-value]
1061

1062
1063
1064
1065
1066
1067
    @property
    def n_estimators_(self) -> int:
        """:obj:`int`: True number of boosting iterations performed.

        This might be less than parameter ``n_estimators`` if early stopping was enabled or
        if boosting stopped early due to limits on complexity like ``min_gain_to_split``.
1068

1069
        .. versionadded:: 4.0.0
1070
1071
        """
        if not self.__sklearn_is_fitted__():
1072
            raise LGBMNotFittedError("No n_estimators found. Need to call fit beforehand.")
1073
        return self._Booster.current_iteration()  # type: ignore
1074
1075
1076
1077
1078
1079
1080

    @property
    def n_iter_(self) -> int:
        """:obj:`int`: True number of boosting iterations performed.

        This might be less than parameter ``n_estimators`` if early stopping was enabled or
        if boosting stopped early due to limits on complexity like ``min_gain_to_split``.
1081

1082
        .. versionadded:: 4.0.0
1083
1084
        """
        if not self.__sklearn_is_fitted__():
1085
            raise LGBMNotFittedError("No n_iter found. Need to call fit beforehand.")
1086
        return self._Booster.current_iteration()  # type: ignore
1087

1088
    @property
1089
    def booster_(self) -> Booster:
1090
        """Booster: The underlying Booster of this model."""
1091
        if not self.__sklearn_is_fitted__():
1092
            raise LGBMNotFittedError("No booster found. Need to call fit beforehand.")
1093
        return self._Booster  # type: ignore[return-value]
wxchan's avatar
wxchan committed
1094

1095
    @property
1096
    def evals_result_(self) -> _EvalResultDict:
1097
        """:obj:`dict`: The evaluation results if validation sets have been specified."""
1098
        if not self.__sklearn_is_fitted__():
1099
            raise LGBMNotFittedError("No results found. Need to call fit with eval_set beforehand.")
1100
        return self._evals_result
1101
1102

    @property
1103
    def feature_importances_(self) -> np.ndarray:
1104
        """:obj:`array` of shape = [n_features]: The feature importances (the higher, the more important).
1105

Nikita Titov's avatar
Nikita Titov committed
1106
1107
1108
1109
        .. note::

            ``importance_type`` attribute is passed to the function
            to configure the type of importance values to be extracted.
1110
        """
1111
        if not self.__sklearn_is_fitted__():
1112
            raise LGBMNotFittedError("No feature_importances found. Need to call fit beforehand.")
1113
        return self._Booster.feature_importance(importance_type=self.importance_type)  # type: ignore[union-attr]
wxchan's avatar
wxchan committed
1114

1115
    @property
1116
1117
    def feature_name_(self) -> List[str]:
        """:obj:`list` of shape = [n_features]: The names of features."""
1118
        if not self.__sklearn_is_fitted__():
1119
            raise LGBMNotFittedError("No feature_name found. Need to call fit beforehand.")
1120
        return self._Booster.feature_name()  # type: ignore[union-attr]
1121

wxchan's avatar
wxchan committed
1122

1123
class LGBMRegressor(_LGBMRegressorBase, LGBMModel):
1124
    """LightGBM regressor."""
wxchan's avatar
wxchan committed
1125

1126
    def fit(  # type: ignore[override]
1127
        self,
1128
1129
        X: _LGBM_ScikitMatrixLike,
        y: _LGBM_LabelType,
1130
1131
1132
        sample_weight: Optional[_LGBM_WeightType] = None,
        init_score: Optional[_LGBM_InitScoreType] = None,
        eval_set: Optional[List[_LGBM_ScikitValidSet]] = None,
1133
        eval_names: Optional[List[str]] = None,
1134
1135
        eval_sample_weight: Optional[List[_LGBM_WeightType]] = None,
        eval_init_score: Optional[List[_LGBM_InitScoreType]] = None,
1136
        eval_metric: Optional[_LGBM_ScikitEvalMetricType] = None,
1137
1138
        feature_name: _LGBM_FeatureNameConfiguration = "auto",
        categorical_feature: _LGBM_CategoricalFeatureConfiguration = "auto",
1139
        callbacks: Optional[List[Callable]] = None,
1140
        init_model: Optional[Union[str, Path, Booster, LGBMModel]] = None,
1141
    ) -> "LGBMRegressor":
1142
        """Docstring is inherited from the LGBMModel."""
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
        super().fit(
            X,
            y,
            sample_weight=sample_weight,
            init_score=init_score,
            eval_set=eval_set,
            eval_names=eval_names,
            eval_sample_weight=eval_sample_weight,
            eval_init_score=eval_init_score,
            eval_metric=eval_metric,
            feature_name=feature_name,
            categorical_feature=categorical_feature,
            callbacks=callbacks,
1156
            init_model=init_model,
1157
        )
Guolin Ke's avatar
Guolin Ke committed
1158
1159
        return self

1160
    _base_doc = LGBMModel.fit.__doc__.replace("self : LGBMModel", "self : LGBMRegressor")  # type: ignore
1161
1162
1163
1164
1165
1166
    _base_doc = (
        _base_doc[: _base_doc.find("group :")]  # type: ignore
        + _base_doc[_base_doc.find("eval_set :") :]
    )  # type: ignore
    _base_doc = _base_doc[: _base_doc.find("eval_class_weight :")] + _base_doc[_base_doc.find("eval_init_score :") :]
    fit.__doc__ = _base_doc[: _base_doc.find("eval_group :")] + _base_doc[_base_doc.find("eval_metric :") :]
wxchan's avatar
wxchan committed
1167

1168

1169
class LGBMClassifier(_LGBMClassifierBase, LGBMModel):
1170
    """LightGBM classifier."""
wxchan's avatar
wxchan committed
1171

1172
    def fit(  # type: ignore[override]
1173
        self,
1174
1175
        X: _LGBM_ScikitMatrixLike,
        y: _LGBM_LabelType,
1176
1177
1178
        sample_weight: Optional[_LGBM_WeightType] = None,
        init_score: Optional[_LGBM_InitScoreType] = None,
        eval_set: Optional[List[_LGBM_ScikitValidSet]] = None,
1179
        eval_names: Optional[List[str]] = None,
1180
1181
1182
        eval_sample_weight: Optional[List[_LGBM_WeightType]] = None,
        eval_class_weight: Optional[List[float]] = None,
        eval_init_score: Optional[List[_LGBM_InitScoreType]] = None,
1183
        eval_metric: Optional[_LGBM_ScikitEvalMetricType] = None,
1184
1185
        feature_name: _LGBM_FeatureNameConfiguration = "auto",
        categorical_feature: _LGBM_CategoricalFeatureConfiguration = "auto",
1186
        callbacks: Optional[List[Callable]] = None,
1187
        init_model: Optional[Union[str, Path, Booster, LGBMModel]] = None,
1188
    ) -> "LGBMClassifier":
1189
        """Docstring is inherited from the LGBMModel."""
1190
        _LGBMAssertAllFinite(y)
1191
1192
        _LGBMCheckClassificationTargets(y)
        self._le = _LGBMLabelEncoder().fit(y)
1193
        _y = self._le.transform(y)
1194
        self._class_map = dict(zip(self._le.classes_, self._le.transform(self._le.classes_)))
1195
1196
        if isinstance(self.class_weight, dict):
            self._class_weight = {self._class_map[k]: v for k, v in self.class_weight.items()}
1197

1198
        self._classes = self._le.classes_
1199
        self._n_classes = len(self._classes)  # type: ignore[arg-type]
1200
1201
        if self.objective is None:
            self._objective = None
1202

1203
1204
        # adjust eval metrics to match whether binary or multiclass
        # classification is being performed
1205
        if not callable(eval_metric):
1206
1207
1208
1209
1210
1211
            if isinstance(eval_metric, list):
                eval_metric_list = eval_metric
            elif isinstance(eval_metric, str):
                eval_metric_list = [eval_metric]
            else:
                eval_metric_list = []
1212
            if self._n_classes > 2:
1213
                for index, metric in enumerate(eval_metric_list):
1214
                    if metric in {"logloss", "binary_logloss"}:
1215
                        eval_metric_list[index] = "multi_logloss"
1216
                    elif metric in {"error", "binary_error"}:
1217
                        eval_metric_list[index] = "multi_error"
1218
            else:
1219
                for index, metric in enumerate(eval_metric_list):
1220
1221
1222
1223
                    if metric in {"logloss", "multi_logloss"}:
                        eval_metric_list[index] = "binary_logloss"
                    elif metric in {"error", "multi_error"}:
                        eval_metric_list[index] = "binary_error"
1224
            eval_metric = eval_metric_list
wxchan's avatar
wxchan committed
1225

1226
        # do not modify args, as it causes errors in model selection tools
1227
        valid_sets: Optional[List[_LGBM_ScikitValidSet]] = None
wxchan's avatar
wxchan committed
1228
        if eval_set is not None:
1229
1230
            if isinstance(eval_set, tuple):
                eval_set = [eval_set]
1231
1232
            valid_sets = []
            for valid_x, valid_y in eval_set:
1233
                if valid_x is X and valid_y is y:
1234
                    valid_sets.append((valid_x, _y))
1235
                else:
1236
                    valid_sets.append((valid_x, self._le.transform(valid_y)))
1237

1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
        super().fit(
            X,
            _y,
            sample_weight=sample_weight,
            init_score=init_score,
            eval_set=valid_sets,
            eval_names=eval_names,
            eval_sample_weight=eval_sample_weight,
            eval_class_weight=eval_class_weight,
            eval_init_score=eval_init_score,
            eval_metric=eval_metric,
            feature_name=feature_name,
            categorical_feature=categorical_feature,
            callbacks=callbacks,
1252
            init_model=init_model,
1253
        )
wxchan's avatar
wxchan committed
1254
1255
        return self

1256
    _base_doc = LGBMModel.fit.__doc__.replace("self : LGBMModel", "self : LGBMClassifier")  # type: ignore
1257
1258
1259
1260
1261
    _base_doc = (
        _base_doc[: _base_doc.find("group :")]  # type: ignore
        + _base_doc[_base_doc.find("eval_set :") :]
    )  # type: ignore
    fit.__doc__ = _base_doc[: _base_doc.find("eval_group :")] + _base_doc[_base_doc.find("eval_metric :") :]
1262

1263
1264
    def predict(
        self,
1265
        X: _LGBM_ScikitMatrixLike,
1266
1267
1268
1269
1270
1271
        raw_score: bool = False,
        start_iteration: int = 0,
        num_iteration: Optional[int] = None,
        pred_leaf: bool = False,
        pred_contrib: bool = False,
        validate_features: bool = False,
1272
        **kwargs: Any,
1273
    ):
1274
        """Docstring is inherited from the LGBMModel."""
1275
1276
1277
1278
1279
1280
1281
1282
        result = self.predict_proba(
            X=X,
            raw_score=raw_score,
            start_iteration=start_iteration,
            num_iteration=num_iteration,
            pred_leaf=pred_leaf,
            pred_contrib=pred_contrib,
            validate_features=validate_features,
1283
            **kwargs,
1284
        )
1285
        if callable(self._objective) or raw_score or pred_leaf or pred_contrib:
1286
1287
1288
1289
            return result
        else:
            class_index = np.argmax(result, axis=1)
            return self._le.inverse_transform(class_index)
wxchan's avatar
wxchan committed
1290

1291
1292
    predict.__doc__ = LGBMModel.predict.__doc__

1293
1294
    def predict_proba(
        self,
1295
        X: _LGBM_ScikitMatrixLike,
1296
1297
1298
1299
1300
1301
        raw_score: bool = False,
        start_iteration: int = 0,
        num_iteration: Optional[int] = None,
        pred_leaf: bool = False,
        pred_contrib: bool = False,
        validate_features: bool = False,
1302
        **kwargs: Any,
1303
    ):
1304
        """Docstring is set after definition, using a template."""
1305
1306
1307
1308
1309
1310
1311
1312
        result = super().predict(
            X=X,
            raw_score=raw_score,
            start_iteration=start_iteration,
            num_iteration=num_iteration,
            pred_leaf=pred_leaf,
            pred_contrib=pred_contrib,
            validate_features=validate_features,
1313
            **kwargs,
1314
        )
1315
        if callable(self._objective) and not (raw_score or pred_leaf or pred_contrib):
1316
1317
1318
1319
1320
            _log_warning(
                "Cannot compute class probabilities or labels "
                "due to the usage of customized objective function.\n"
                "Returning raw scores instead."
            )
1321
            return result
1322
        elif self._n_classes > 2 or raw_score or pred_leaf or pred_contrib:  # type: ignore [operator]
1323
            return result
wxchan's avatar
wxchan committed
1324
        else:
1325
            return np.vstack((1.0 - result, result)).transpose()
1326

1327
1328
    predict_proba.__doc__ = _lgbmmodel_doc_predict.format(
        description="Return the predicted probability for each class for each sample.",
1329
        X_shape="numpy array, pandas DataFrame, H2O DataTable's Frame , scipy.sparse, list of lists of int or float of shape = [n_samples, n_features]",
1330
        output_name="predicted_probability",
1331
        predicted_result_shape="array-like of shape = [n_samples] or shape = [n_samples, n_classes]",
1332
        X_leaves_shape="array-like of shape = [n_samples, n_trees] or shape = [n_samples, n_trees * n_classes]",
1333
        X_SHAP_values_shape="array-like of shape = [n_samples, n_features + 1] or shape = [n_samples, (n_features + 1) * n_classes] or list with n_classes length of such objects",
1334
1335
    )

1336
    @property
1337
    def classes_(self) -> np.ndarray:
1338
        """:obj:`array` of shape = [n_classes]: The class label array."""
1339
        if not self.__sklearn_is_fitted__():
1340
            raise LGBMNotFittedError("No classes found. Need to call fit beforehand.")
1341
        return self._classes  # type: ignore[return-value]
1342
1343

    @property
1344
    def n_classes_(self) -> int:
1345
        """:obj:`int`: The number of classes."""
1346
        if not self.__sklearn_is_fitted__():
1347
            raise LGBMNotFittedError("No classes found. Need to call fit beforehand.")
1348
        return self._n_classes
wxchan's avatar
wxchan committed
1349

wxchan's avatar
wxchan committed
1350

wxchan's avatar
wxchan committed
1351
class LGBMRanker(LGBMModel):
1352
1353
1354
1355
1356
1357
1358
1359
    """LightGBM ranker.

    .. warning::

        scikit-learn doesn't support ranking applications yet,
        therefore this class is not really compatible with the sklearn ecosystem.
        Please use this class mainly for training and applying ranking models in common sklearnish way.
    """
wxchan's avatar
wxchan committed
1360

1361
    def fit(  # type: ignore[override]
1362
        self,
1363
1364
        X: _LGBM_ScikitMatrixLike,
        y: _LGBM_LabelType,
1365
1366
        sample_weight: Optional[_LGBM_WeightType] = None,
        init_score: Optional[_LGBM_InitScoreType] = None,
1367
        group: Optional[_LGBM_GroupType] = None,
1368
        eval_set: Optional[List[_LGBM_ScikitValidSet]] = None,
1369
        eval_names: Optional[List[str]] = None,
1370
1371
1372
        eval_sample_weight: Optional[List[_LGBM_WeightType]] = None,
        eval_init_score: Optional[List[_LGBM_InitScoreType]] = None,
        eval_group: Optional[List[_LGBM_GroupType]] = None,
1373
        eval_metric: Optional[_LGBM_ScikitEvalMetricType] = None,
1374
        eval_at: Union[List[int], Tuple[int, ...]] = (1, 2, 3, 4, 5),
1375
1376
        feature_name: _LGBM_FeatureNameConfiguration = "auto",
        categorical_feature: _LGBM_CategoricalFeatureConfiguration = "auto",
1377
        callbacks: Optional[List[Callable]] = None,
1378
        init_model: Optional[Union[str, Path, Booster, LGBMModel]] = None,
1379
    ) -> "LGBMRanker":
1380
        """Docstring is inherited from the LGBMModel."""
1381
        # check group data
Guolin Ke's avatar
Guolin Ke committed
1382
        if group is None:
1383
            raise ValueError("Should set group for ranking task")
wxchan's avatar
wxchan committed
1384
1385

        if eval_set is not None:
Guolin Ke's avatar
Guolin Ke committed
1386
            if eval_group is None:
1387
                raise ValueError("Eval_group cannot be None when eval_set is not None")
Guolin Ke's avatar
Guolin Ke committed
1388
            elif len(eval_group) != len(eval_set):
1389
                raise ValueError("Length of eval_group should be equal to eval_set")
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
            elif (
                isinstance(eval_group, dict)
                and any(i not in eval_group or eval_group[i] is None for i in range(len(eval_group)))
                or isinstance(eval_group, list)
                and any(group is None for group in eval_group)
            ):
                raise ValueError(
                    "Should set group for all eval datasets for ranking task; "
                    "if you use dict, the index should start from 0"
                )
1400

1401
        self._eval_at = eval_at
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
        super().fit(
            X,
            y,
            sample_weight=sample_weight,
            init_score=init_score,
            group=group,
            eval_set=eval_set,
            eval_names=eval_names,
            eval_sample_weight=eval_sample_weight,
            eval_init_score=eval_init_score,
            eval_group=eval_group,
            eval_metric=eval_metric,
            feature_name=feature_name,
            categorical_feature=categorical_feature,
            callbacks=callbacks,
1417
            init_model=init_model,
1418
        )
wxchan's avatar
wxchan committed
1419
        return self
1420

1421
    _base_doc = LGBMModel.fit.__doc__.replace("self : LGBMModel", "self : LGBMRanker")  # type: ignore
1422
1423
1424
1425
    fit.__doc__ = (
        _base_doc[: _base_doc.find("eval_class_weight :")]  # type: ignore
        + _base_doc[_base_doc.find("eval_init_score :") :]
    )  # type: ignore
1426
    _base_doc = fit.__doc__
1427
    _before_feature_name, _feature_name, _after_feature_name = _base_doc.partition("feature_name :")
1428
    fit.__doc__ = f"""{_before_feature_name}eval_at : list or tuple of int, optional (default=(1, 2, 3, 4, 5))
1429
        The evaluation positions of the specified metric.
1430
    {_feature_name}{_after_feature_name}"""