sklearn.py 78 KB
Newer Older
wxchan's avatar
wxchan committed
1
# coding: utf-8
2
"""Scikit-learn wrapper interface for LightGBM."""
3

4
import copy
5
from inspect import signature
6
from pathlib import Path
7
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union
8

wxchan's avatar
wxchan committed
9
import numpy as np
10
import scipy.sparse
11

12
from .basic import (
13
    _MULTICLASS_OBJECTIVES,
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
    Booster,
    Dataset,
    LightGBMError,
    _choose_param_value,
    _ConfigAliases,
    _LGBM_BoosterBestScoreType,
    _LGBM_CategoricalFeatureConfiguration,
    _LGBM_EvalFunctionResultType,
    _LGBM_FeatureNameConfiguration,
    _LGBM_GroupType,
    _LGBM_InitScoreType,
    _LGBM_LabelType,
    _LGBM_WeightType,
    _log_warning,
)
29
from .callback import _EvalResultDict, record_evaluation
30
31
32
33
34
35
36
37
38
39
40
41
from .compat import (
    SKLEARN_INSTALLED,
    LGBMNotFittedError,
    _LGBMAssertAllFinite,
    _LGBMCheckClassificationTargets,
    _LGBMCheckSampleWeight,
    _LGBMClassifierBase,
    _LGBMComputeSampleWeight,
    _LGBMCpuCount,
    _LGBMLabelEncoder,
    _LGBMModelBase,
    _LGBMRegressorBase,
42
43
    _LGBMValidateData,
    _sklearn_version,
44
45
    pd_DataFrame,
)
wxchan's avatar
wxchan committed
46
from .engine import train
47

48
49
50
51
if TYPE_CHECKING:
    from .compat import _sklearn_Tags


52
__all__ = [
53
54
55
56
    "LGBMClassifier",
    "LGBMModel",
    "LGBMRanker",
    "LGBMRegressor",
57
58
]

59
60
61
62
_LGBM_ScikitMatrixLike = Union[
    List[Union[List[float], List[int]]],
    np.ndarray,
    pd_DataFrame,
63
    scipy.sparse.spmatrix,
64
]
65
_LGBM_ScikitCustomObjectiveFunction = Union[
66
    # f(labels, preds)
67
    Callable[
68
        [Optional[np.ndarray], np.ndarray],
69
        Tuple[np.ndarray, np.ndarray],
70
    ],
71
    # f(labels, preds, weights)
72
    Callable[
73
        [Optional[np.ndarray], np.ndarray, Optional[np.ndarray]],
74
        Tuple[np.ndarray, np.ndarray],
75
    ],
76
    # f(labels, preds, weights, group)
77
    Callable[
78
        [Optional[np.ndarray], np.ndarray, Optional[np.ndarray], Optional[np.ndarray]],
79
        Tuple[np.ndarray, np.ndarray],
80
    ],
81
82
]
_LGBM_ScikitCustomEvalFunction = Union[
83
    # f(labels, preds)
84
    Callable[
85
        [Optional[np.ndarray], np.ndarray],
86
        _LGBM_EvalFunctionResultType,
87
88
    ],
    Callable[
89
        [Optional[np.ndarray], np.ndarray],
90
        List[_LGBM_EvalFunctionResultType],
91
    ],
92
    # f(labels, preds, weights)
93
    Callable[
94
        [Optional[np.ndarray], np.ndarray, Optional[np.ndarray]],
95
        _LGBM_EvalFunctionResultType,
96
    ],
97
98
    Callable[
        [Optional[np.ndarray], np.ndarray, Optional[np.ndarray]],
99
        List[_LGBM_EvalFunctionResultType],
100
101
102
103
    ],
    # f(labels, preds, weights, group)
    Callable[
        [Optional[np.ndarray], np.ndarray, Optional[np.ndarray], Optional[np.ndarray]],
104
        _LGBM_EvalFunctionResultType,
105
106
107
    ],
    Callable[
        [Optional[np.ndarray], np.ndarray, Optional[np.ndarray], Optional[np.ndarray]],
108
109
        List[_LGBM_EvalFunctionResultType],
    ],
110
]
111
112
113
_LGBM_ScikitEvalMetricType = Union[
    str,
    _LGBM_ScikitCustomEvalFunction,
114
    List[Union[str, _LGBM_ScikitCustomEvalFunction]],
115
]
116
_LGBM_ScikitValidSet = Tuple[_LGBM_ScikitMatrixLike, _LGBM_LabelType]
117

wxchan's avatar
wxchan committed
118

119
120
121
122
123
124
def _get_group_from_constructed_dataset(dataset: Dataset) -> Optional[np.ndarray]:
    group = dataset.get_group()
    error_msg = (
        "Estimators in lightgbm.sklearn should only retrieve query groups from a constructed Dataset. "
        "If you're seeing this message, it's a bug in lightgbm. Please report it at https://github.com/microsoft/LightGBM/issues."
    )
125
    assert group is None or isinstance(group, np.ndarray), error_msg
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
    return group


def _get_label_from_constructed_dataset(dataset: Dataset) -> np.ndarray:
    label = dataset.get_label()
    error_msg = (
        "Estimators in lightgbm.sklearn should only retrieve labels from a constructed Dataset. "
        "If you're seeing this message, it's a bug in lightgbm. Please report it at https://github.com/microsoft/LightGBM/issues."
    )
    assert isinstance(label, np.ndarray), error_msg
    return label


def _get_weight_from_constructed_dataset(dataset: Dataset) -> Optional[np.ndarray]:
    weight = dataset.get_weight()
    error_msg = (
        "Estimators in lightgbm.sklearn should only retrieve weights from a constructed Dataset. "
        "If you're seeing this message, it's a bug in lightgbm. Please report it at https://github.com/microsoft/LightGBM/issues."
    )
145
    assert weight is None or isinstance(weight, np.ndarray), error_msg
146
147
148
    return weight


149
class _ObjectiveFunctionWrapper:
150
    """Proxy class for objective function."""
151

152
    def __init__(self, func: _LGBM_ScikitCustomObjectiveFunction):
153
        """Construct a proxy class.
154

155
156
        This class transforms objective function to match objective function with signature ``new_func(preds, dataset)``
        as expected by ``lightgbm.engine.train``.
157

158
159
160
        Parameters
        ----------
        func : callable
161
162
163
164
            Expects a callable with following signatures:
            ``func(y_true, y_pred)``,
            ``func(y_true, y_pred, weight)``
            or ``func(y_true, y_pred, weight, group)``
165
166
            and returns (grad, hess):

167
                y_true : numpy 1-D array of shape = [n_samples]
168
                    The target values.
169
                y_pred : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
170
                    The predicted values.
171
172
                    Predicted values are returned before any transformation,
                    e.g. they are raw margin instead of probability of positive class for binary task.
173
174
                weight : numpy 1-D array of shape = [n_samples]
                    The weight of samples. Weights should be non-negative.
175
                group : numpy 1-D array
176
177
178
                    Group/query data.
                    Only used in the learning-to-rank task.
                    sum(group) = n_samples.
179
180
                    For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
                    where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
181
                grad : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape [n_samples, n_classes] (for multi-class task)
182
183
                    The value of the first order derivative (gradient) of the loss
                    with respect to the elements of y_pred for each sample point.
184
                hess : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
185
186
                    The value of the second order derivative (Hessian) of the loss
                    with respect to the elements of y_pred for each sample point.
wxchan's avatar
wxchan committed
187

Nikita Titov's avatar
Nikita Titov committed
188
189
        .. note::

190
            For multi-class task, y_pred is a numpy 2-D array of shape = [n_samples, n_classes],
191
            and grad and hess should be returned in the same format.
192
193
        """
        self.func = func
wxchan's avatar
wxchan committed
194

195
196
197
198
199
    def __call__(
        self,
        preds: np.ndarray,
        dataset: Dataset,
    ) -> Tuple[np.ndarray, np.ndarray]:
200
201
202
203
        """Call passed function with appropriate arguments.

        Parameters
        ----------
204
        preds : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
205
206
207
208
209
210
            The predicted values.
        dataset : Dataset
            The training dataset.

        Returns
        -------
211
        grad : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
212
213
            The value of the first order derivative (gradient) of the loss
            with respect to the elements of preds for each sample point.
214
        hess : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
215
216
            The value of the second order derivative (Hessian) of the loss
            with respect to the elements of preds for each sample point.
217
        """
218
        labels = _get_label_from_constructed_dataset(dataset)
219
        argc = len(signature(self.func).parameters)
220
        if argc == 2:
221
            grad, hess = self.func(labels, preds)  # type: ignore[call-arg]
222
223
224
225
226
227
228
229
230
231
232
233
            return grad, hess

        weight = _get_weight_from_constructed_dataset(dataset)
        if argc == 3:
            grad, hess = self.func(labels, preds, weight)  # type: ignore[call-arg]
            return grad, hess

        if argc == 4:
            group = _get_group_from_constructed_dataset(dataset)
            return self.func(labels, preds, weight, group)  # type: ignore[call-arg]

        raise TypeError(f"Self-defined objective function should have 2, 3 or 4 arguments, got {argc}")
wxchan's avatar
wxchan committed
234

wxchan's avatar
wxchan committed
235

236
class _EvalFunctionWrapper:
237
    """Proxy class for evaluation function."""
238

239
    def __init__(self, func: _LGBM_ScikitCustomEvalFunction):
240
        """Construct a proxy class.
241

242
243
        This class transforms evaluation function to match evaluation function with signature ``new_func(preds, dataset)``
        as expected by ``lightgbm.engine.train``.
244

245
246
247
248
249
250
251
252
253
254
        Parameters
        ----------
        func : callable
            Expects a callable with following signatures:
            ``func(y_true, y_pred)``,
            ``func(y_true, y_pred, weight)``
            or ``func(y_true, y_pred, weight, group)``
            and returns (eval_name, eval_result, is_higher_better) or
            list of (eval_name, eval_result, is_higher_better):

255
                y_true : numpy 1-D array of shape = [n_samples]
256
                    The target values.
257
                y_pred : numpy 1-D array of shape = [n_samples] or numpy 2-D array shape = [n_samples, n_classes] (for multi-class task)
258
                    The predicted values.
259
260
                    In case of custom ``objective``, predicted values are returned before any transformation,
                    e.g. they are raw margin instead of probability of positive class for binary task in this case.
261
                weight : numpy 1-D array of shape = [n_samples]
262
                    The weight of samples. Weights should be non-negative.
263
                group : numpy 1-D array
264
265
266
                    Group/query data.
                    Only used in the learning-to-rank task.
                    sum(group) = n_samples.
267
268
                    For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
                    where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
269
                eval_name : str
Andrew Ziem's avatar
Andrew Ziem committed
270
                    The name of evaluation function (without whitespace).
271
272
273
274
275
276
                eval_result : float
                    The eval result.
                is_higher_better : bool
                    Is eval result higher better, e.g. AUC is ``is_higher_better``.
        """
        self.func = func
277

278
279
280
    def __call__(
        self,
        preds: np.ndarray,
281
        dataset: Dataset,
282
    ) -> Union[_LGBM_EvalFunctionResultType, List[_LGBM_EvalFunctionResultType]]:
283
        """Call passed function with appropriate arguments.
284

285
286
        Parameters
        ----------
287
        preds : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
288
289
290
291
292
293
            The predicted values.
        dataset : Dataset
            The training dataset.

        Returns
        -------
294
        eval_name : str
Andrew Ziem's avatar
Andrew Ziem committed
295
            The name of evaluation function (without whitespace).
296
297
298
299
300
        eval_result : float
            The eval result.
        is_higher_better : bool
            Is eval result higher better, e.g. AUC is ``is_higher_better``.
        """
301
        labels = _get_label_from_constructed_dataset(dataset)
302
        argc = len(signature(self.func).parameters)
303
        if argc == 2:
304
            return self.func(labels, preds)  # type: ignore[call-arg]
305
306
307
308
309
310
311
312
313
314

        weight = _get_weight_from_constructed_dataset(dataset)
        if argc == 3:
            return self.func(labels, preds, weight)  # type: ignore[call-arg]

        if argc == 4:
            group = _get_group_from_constructed_dataset(dataset)
            return self.func(labels, preds, weight, group)  # type: ignore[call-arg]

        raise TypeError(f"Self-defined eval function should have 2, 3 or 4 arguments, got {argc}")
315

wxchan's avatar
wxchan committed
316

317
318
319
# documentation templates for LGBMModel methods are shared between the classes in
# this module and those in the ``dask`` module

320
_lgbmmodel_doc_fit = """
321
322
323
324
325
326
327
328
329
    Build a gradient boosting model from the training set (X, y).

    Parameters
    ----------
    X : {X_shape}
        Input feature matrix.
    y : {y_shape}
        The target values (class labels in classification, real numbers in regression).
    sample_weight : {sample_weight_shape}
330
        Weights of training data. Weights should be non-negative.
331
    init_score : {init_score_shape}
332
333
334
335
336
337
338
339
340
        Init score of training data.
    group : {group_shape}
        Group/query data.
        Only used in the learning-to-rank task.
        sum(group) = n_samples.
        For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
        where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
    eval_set : list or None, optional (default=None)
        A list of (X, y) tuple pairs to use as validation sets.
341
    eval_names : list of str, or None, optional (default=None)
342
        Names of eval_set.
343
    eval_sample_weight : {eval_sample_weight_shape}
344
        Weights of eval data. Weights should be non-negative.
345
346
    eval_class_weight : list or None, optional (default=None)
        Class weights of eval data.
347
    eval_init_score : {eval_init_score_shape}
348
        Init score of eval data.
349
    eval_group : {eval_group_shape}
350
        Group data of eval data.
351
352
    eval_metric : str, callable, list or None, optional (default=None)
        If str, it should be a built-in evaluation metric to use.
353
354
355
356
        If callable, it should be a custom evaluation metric, see note below for more details.
        If list, it can be a list of built-in metrics, a list of custom evaluation metrics, or a mix of both.
        In either case, the ``metric`` from the model parameters will be evaluated and used as well.
        Default: 'l2' for LGBMRegressor, 'logloss' for LGBMClassifier, 'ndcg' for LGBMRanker.
357
    feature_name : list of str, or 'auto', optional (default='auto')
358
359
        Feature names.
        If 'auto' and data is pandas DataFrame, data columns names are used.
360
    categorical_feature : list of str or int, or 'auto', optional (default='auto')
361
362
        Categorical features.
        If list of int, interpreted as indices.
363
        If list of str, interpreted as feature names (need to specify ``feature_name`` as well).
364
        If 'auto' and data is pandas DataFrame, pandas unordered categorical columns are used.
365
        All values in categorical features will be cast to int32 and thus should be less than int32 max value (2147483647).
366
367
368
        Large values could be memory consuming. Consider using consecutive integers starting from zero.
        All negative values in categorical features will be treated as missing values.
        The output cannot be monotonically constrained with respect to a categorical feature.
369
        Floating point numbers in categorical features will be rounded towards 0.
370
    callbacks : list of callable, or None, optional (default=None)
371
372
        List of callback functions that are applied at each iteration.
        See Callbacks in Python API for more information.
373
    init_model : str, pathlib.Path, Booster, LGBMModel or None, optional (default=None)
374
375
376
377
        Filename of LightGBM model, Booster instance or LGBMModel instance used for continue training.

    Returns
    -------
378
    self : LGBMModel
379
380
381
382
383
384
385
386
387
388
389
390
        Returns self.
    """

_lgbmmodel_doc_custom_eval_note = """
    Note
    ----
    Custom eval function expects a callable with following signatures:
    ``func(y_true, y_pred)``, ``func(y_true, y_pred, weight)`` or
    ``func(y_true, y_pred, weight, group)``
    and returns (eval_name, eval_result, is_higher_better) or
    list of (eval_name, eval_result, is_higher_better):

391
        y_true : numpy 1-D array of shape = [n_samples]
392
            The target values.
393
        y_pred : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
394
            The predicted values.
395
396
            In case of custom ``objective``, predicted values are returned before any transformation,
            e.g. they are raw margin instead of probability of positive class for binary task in this case.
397
        weight : numpy 1-D array of shape = [n_samples]
398
            The weight of samples. Weights should be non-negative.
399
        group : numpy 1-D array
400
401
402
403
404
            Group/query data.
            Only used in the learning-to-rank task.
            sum(group) = n_samples.
            For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
            where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
405
        eval_name : str
Andrew Ziem's avatar
Andrew Ziem committed
406
            The name of evaluation function (without whitespace).
407
408
409
410
411
412
        eval_result : float
            The eval result.
        is_higher_better : bool
            Is eval result higher better, e.g. AUC is ``is_higher_better``.
"""

413
_lgbmmodel_doc_predict = """
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
    {description}

    Parameters
    ----------
    X : {X_shape}
        Input features matrix.
    raw_score : bool, optional (default=False)
        Whether to predict raw scores.
    start_iteration : int, optional (default=0)
        Start index of the iteration to predict.
        If <= 0, starts from the first iteration.
    num_iteration : int or None, optional (default=None)
        Total number of iterations used in the prediction.
        If None, if the best iteration exists and start_iteration <= 0, the best iteration is used;
        otherwise, all iterations from ``start_iteration`` are used (no limits).
        If <= 0, all iterations from ``start_iteration`` are used (no limits).
    pred_leaf : bool, optional (default=False)
        Whether to predict leaf index.
    pred_contrib : bool, optional (default=False)
        Whether to predict feature contributions.

        .. note::

            If you want to get more explanations for your model's predictions using SHAP values,
            like SHAP interaction values,
            you can install the shap package (https://github.com/slundberg/shap).
            Note that unlike the shap package, with ``pred_contrib`` we return a matrix with an extra
            column, where the last column is the expected value.

443
444
445
    validate_features : bool, optional (default=False)
        If True, ensure that the features used to predict match the ones used to train.
        Used only if data is pandas DataFrame.
446
447
448
449
450
451
452
453
454
455
456
457
458
459
    **kwargs
        Other parameters for the prediction.

    Returns
    -------
    {output_name} : {predicted_result_shape}
        The predicted values.
    X_leaves : {X_leaves_shape}
        If ``pred_leaf=True``, the predicted leaf of every tree for each sample.
    X_SHAP_values : {X_SHAP_values_shape}
        If ``pred_contrib=True``, the feature contributions for each sample.
    """


460
461
462
463
464
465
466
467
468
469
470
471
472
def _extract_evaluation_meta_data(
    *,
    collection: Optional[Union[Dict[Any, Any], List[Any]]],
    name: str,
    i: int,
) -> Optional[Any]:
    """Try to extract the ith element of one of the ``eval_*`` inputs."""
    if collection is None:
        return None
    elif isinstance(collection, list):
        # It's possible, for example, to pass 3 eval sets through `eval_set`,
        # but only 1 init_score through `eval_init_score`.
        #
473
        # This if-else accounts for that possibility.
474
475
476
477
478
479
480
481
482
483
        if len(collection) > i:
            return collection[i]
        else:
            return None
    elif isinstance(collection, dict):
        return collection.get(i, None)
    else:
        raise TypeError(f"{name} should be dict or list")


484
485
class LGBMModel(_LGBMModelBase):
    """Implementation of the scikit-learn API for LightGBM."""
wxchan's avatar
wxchan committed
486

487
488
    def __init__(
        self,
489
        *,
490
        boosting_type: str = "gbdt",
491
492
493
494
495
        num_leaves: int = 31,
        max_depth: int = -1,
        learning_rate: float = 0.1,
        n_estimators: int = 100,
        subsample_for_bin: int = 200000,
496
        objective: Optional[Union[str, _LGBM_ScikitCustomObjectiveFunction]] = None,
497
        class_weight: Optional[Union[Dict, str]] = None,
498
        min_split_gain: float = 0.0,
499
500
        min_child_weight: float = 1e-3,
        min_child_samples: int = 20,
501
        subsample: float = 1.0,
502
        subsample_freq: int = 0,
503
504
505
        colsample_bytree: float = 1.0,
        reg_alpha: float = 0.0,
        reg_lambda: float = 0.0,
506
        random_state: Optional[Union[int, np.random.RandomState, np.random.Generator]] = None,
507
        n_jobs: Optional[int] = None,
508
        importance_type: str = "split",
509
        **kwargs: Any,
510
    ):
511
        r"""Construct a gradient boosting model.
wxchan's avatar
wxchan committed
512
513
514

        Parameters
        ----------
515
        boosting_type : str, optional (default='gbdt')
516
517
518
519
            'gbdt', traditional Gradient Boosting Decision Tree.
            'dart', Dropouts meet Multiple Additive Regression Trees.
            'rf', Random Forest.
        num_leaves : int, optional (default=31)
wxchan's avatar
wxchan committed
520
            Maximum tree leaves for base learners.
521
        max_depth : int, optional (default=-1)
522
            Maximum tree depth for base learners, <=0 means no limit.
523
            If setting this to a positive value, consider also changing ``num_leaves`` to ``<= 2^max_depth``.
524
        learning_rate : float, optional (default=0.1)
525
            Boosting learning rate.
526
527
528
            You can use ``callbacks`` parameter of ``fit`` method to shrink/adapt learning rate
            in training using ``reset_parameter`` callback.
            Note, that this will ignore the ``learning_rate`` argument in training.
529
        n_estimators : int, optional (default=100)
wxchan's avatar
wxchan committed
530
            Number of boosted trees to fit.
531
        subsample_for_bin : int, optional (default=200000)
wxchan's avatar
wxchan committed
532
            Number of samples for constructing bins.
533
        objective : str, callable or None, optional (default=None)
wxchan's avatar
wxchan committed
534
535
            Specify the learning task and the corresponding learning objective or
            a custom objective function to be used (see note below).
536
            Default: 'regression' for LGBMRegressor, 'binary' or 'multiclass' for LGBMClassifier, 'lambdarank' for LGBMRanker.
537
538
539
540
        class_weight : dict, 'balanced' or None, optional (default=None)
            Weights associated with classes in the form ``{class_label: weight}``.
            Use this parameter only for multi-class classification task;
            for binary classification task you may use ``is_unbalance`` or ``scale_pos_weight`` parameters.
541
542
543
            Note, that the usage of all these parameters will result in poor estimates of the individual class probabilities.
            You may want to consider performing probability calibration
            (https://scikit-learn.org/stable/modules/calibration.html) of your model.
544
545
546
            The 'balanced' mode uses the values of y to automatically adjust weights
            inversely proportional to class frequencies in the input data as ``n_samples / (n_classes * np.bincount(y))``.
            If None, all classes are supposed to have weight one.
547
            Note, that these weights will be multiplied with ``sample_weight`` (passed through the ``fit`` method)
548
            if ``sample_weight`` is specified.
549
        min_split_gain : float, optional (default=0.)
wxchan's avatar
wxchan committed
550
            Minimum loss reduction required to make a further partition on a leaf node of the tree.
551
        min_child_weight : float, optional (default=1e-3)
552
            Minimum sum of instance weight (Hessian) needed in a child (leaf).
553
        min_child_samples : int, optional (default=20)
554
            Minimum number of data needed in a child (leaf).
555
        subsample : float, optional (default=1.)
wxchan's avatar
wxchan committed
556
            Subsample ratio of the training instance.
557
        subsample_freq : int, optional (default=0)
Andrew Ziem's avatar
Andrew Ziem committed
558
            Frequency of subsample, <=0 means no enable.
559
        colsample_bytree : float, optional (default=1.)
wxchan's avatar
wxchan committed
560
            Subsample ratio of columns when constructing each tree.
561
        reg_alpha : float, optional (default=0.)
562
            L1 regularization term on weights.
563
        reg_lambda : float, optional (default=0.)
564
            L2 regularization term on weights.
565
        random_state : int, RandomState object or None, optional (default=None)
wxchan's avatar
wxchan committed
566
            Random number seed.
567
            If int, this number is used to seed the C++ code.
568
            If RandomState or Generator object (numpy), a random integer is picked based on its state to seed the C++ code.
569
            If None, default seeds in C++ code are used.
570
571
572
573
574
575
576
577
578
579
580
581
        n_jobs : int or None, optional (default=None)
            Number of parallel threads to use for training (can be changed at prediction time by
            passing it as an extra keyword argument).

            For better performance, it is recommended to set this to the number of physical cores
            in the CPU.

            Negative integers are interpreted as following joblib's formula (n_cpus + 1 + n_jobs), just like
            scikit-learn (so e.g. -1 means using all threads). A value of zero corresponds the default number of
            threads configured for OpenMP in the system. A value of ``None`` (the default) corresponds
            to using the number of physical cores in the system (its correct detection requires
            either the ``joblib`` or the ``psutil`` util libraries to be installed).
582
583
584

            .. versionchanged:: 4.0.0

585
        importance_type : str, optional (default='split')
586
            The type of feature importance to be filled into ``feature_importances_``.
587
588
589
590
            If 'split', result contains numbers of times the feature is used in a model.
            If 'gain', result contains total gains of splits which use the feature.
        **kwargs
            Other parameters for the model.
wxchan's avatar
wxchan committed
591
            Check http://lightgbm.readthedocs.io/en/latest/Parameters.html for more parameters.
592

Nikita Titov's avatar
Nikita Titov committed
593
594
595
            .. warning::

                \*\*kwargs is not supported in sklearn, it may cause unexpected issues.
wxchan's avatar
wxchan committed
596
597
598

        Note
        ----
599
600
        A custom objective function can be provided for the ``objective`` parameter.
        In this case, it should have the signature
601
602
603
        ``objective(y_true, y_pred) -> grad, hess``,
        ``objective(y_true, y_pred, weight) -> grad, hess``
        or ``objective(y_true, y_pred, weight, group) -> grad, hess``:
wxchan's avatar
wxchan committed
604

605
            y_true : numpy 1-D array of shape = [n_samples]
606
                The target values.
607
            y_pred : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
608
                The predicted values.
609
610
                Predicted values are returned before any transformation,
                e.g. they are raw margin instead of probability of positive class for binary task.
611
612
            weight : numpy 1-D array of shape = [n_samples]
                The weight of samples. Weights should be non-negative.
613
            group : numpy 1-D array
614
615
616
                Group/query data.
                Only used in the learning-to-rank task.
                sum(group) = n_samples.
617
618
                For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
                where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
619
            grad : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
620
621
                The value of the first order derivative (gradient) of the loss
                with respect to the elements of y_pred for each sample point.
622
            hess : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
623
624
                The value of the second order derivative (Hessian) of the loss
                with respect to the elements of y_pred for each sample point.
wxchan's avatar
wxchan committed
625

626
        For multi-class task, y_pred is a numpy 2-D array of shape = [n_samples, n_classes],
627
        and grad and hess should be returned in the same format.
wxchan's avatar
wxchan committed
628
        """
wxchan's avatar
wxchan committed
629
        if not SKLEARN_INSTALLED:
630
631
632
633
            raise LightGBMError(
                "scikit-learn is required for lightgbm.sklearn. "
                "You must install scikit-learn and restart your session to use this module."
            )
wxchan's avatar
wxchan committed
634

635
        self.boosting_type = boosting_type
636
        self.objective = objective
wxchan's avatar
wxchan committed
637
638
639
640
        self.num_leaves = num_leaves
        self.max_depth = max_depth
        self.learning_rate = learning_rate
        self.n_estimators = n_estimators
wxchan's avatar
wxchan committed
641
        self.subsample_for_bin = subsample_for_bin
wxchan's avatar
wxchan committed
642
643
644
645
646
647
648
649
        self.min_split_gain = min_split_gain
        self.min_child_weight = min_child_weight
        self.min_child_samples = min_child_samples
        self.subsample = subsample
        self.subsample_freq = subsample_freq
        self.colsample_bytree = colsample_bytree
        self.reg_alpha = reg_alpha
        self.reg_lambda = reg_lambda
650
651
        self.random_state = random_state
        self.n_jobs = n_jobs
652
        self.importance_type = importance_type
653
        self._Booster: Optional[Booster] = None
654
655
        self._evals_result: _EvalResultDict = {}
        self._best_score: _LGBM_BoosterBestScoreType = {}
656
        self._best_iteration: int = -1
657
        self._other_params: Dict[str, Any] = {}
658
        self._objective = objective
659
        self.class_weight = class_weight
660
661
        self._class_weight: Optional[Union[Dict, str]] = None
        self._class_map: Optional[Dict[int, int]] = None
662
663
        self._n_features: int = -1
        self._n_features_in: int = -1
664
        self._classes: Optional[np.ndarray] = None
665
        self._n_classes: int = -1
666
        self.set_params(**kwargs)
wxchan's avatar
wxchan committed
667

668
669
670
671
    # scikit-learn 1.6 introduced an __sklearn__tags() method intended to replace _more_tags().
    # _more_tags() can be removed whenever lightgbm's minimum supported scikit-learn version
    # is >=1.6.
    # ref: https://github.com/microsoft/LightGBM/pull/6651
672
    def _more_tags(self) -> Dict[str, Any]:
673
674
675
676
677
678
679
680
681
        check_sample_weight_str = (
            "In LightGBM, setting a sample's weight to 0 can produce a different result than omitting the sample. "
            "Such samples intentionally still affect count-based measures like 'min_data_in_leaf' "
            "(https://github.com/microsoft/LightGBM/issues/5626#issuecomment-1712706678) and the estimated distribution "
            "of features for Dataset construction (see https://github.com/microsoft/LightGBM/issues/5553)."
        )
        # "check_sample_weight_equivalence" can be removed when lightgbm's
        # minimum supported scikit-learn version is at least 1.6
        # ref: https://github.com/scikit-learn/scikit-learn/pull/30137
682
        return {
683
684
685
686
687
            "allow_nan": True,
            "X_types": ["2darray", "sparse", "1dlabels"],
            "_xfail_checks": {
                "check_no_attributes_set_in_init": "scikit-learn incorrectly asserts that private attributes "
                "cannot be set in __init__: "
688
                "(see https://github.com/microsoft/LightGBM/issues/2628)",
689
690
691
                "check_sample_weight_equivalence": check_sample_weight_str,
                "check_sample_weight_equivalence_on_dense_data": check_sample_weight_str,
                "check_sample_weight_equivalence_on_sparse_data": check_sample_weight_str,
692
            },
693
        }
Nikita Titov's avatar
Nikita Titov committed
694

695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
    @staticmethod
    def _update_sklearn_tags_from_dict(
        *,
        tags: "_sklearn_Tags",
        tags_dict: Dict[str, Any],
    ) -> "_sklearn_Tags":
        """Update ``sklearn.utils.Tags`` inherited from ``scikit-learn`` base classes.

        ``scikit-learn`` 1.6 introduced a dataclass-based interface for estimator tags.
        ref: https://github.com/scikit-learn/scikit-learn/pull/29677

        This method handles updating that instance based on the value in ``self._more_tags()``.
        """
        tags.input_tags.allow_nan = tags_dict["allow_nan"]
        tags.input_tags.sparse = "sparse" in tags_dict["X_types"]
        tags.target_tags.one_d_labels = "1dlabels" in tags_dict["X_types"]
        return tags

    def __sklearn_tags__(self) -> Optional["_sklearn_Tags"]:
        # _LGBMModelBase.__sklearn_tags__() cannot be called unconditionally,
        # because that method isn't defined for scikit-learn<1.6
        if not hasattr(_LGBMModelBase, "__sklearn_tags__"):
            err_msg = (
                "__sklearn_tags__() should not be called when using scikit-learn<1.6. "
                f"Detected version: {_sklearn_version}"
            )
            raise AttributeError(err_msg)

        # take whatever tags are provided by BaseEstimator, then modify
        # them with LightGBM-specific values
        return self._update_sklearn_tags_from_dict(
726
            tags=super().__sklearn_tags__(),
727
728
729
            tags_dict=self._more_tags(),
        )

730
731
732
    def __sklearn_is_fitted__(self) -> bool:
        return getattr(self, "fitted_", False)

733
    def get_params(self, deep: bool = True) -> Dict[str, Any]:
734
735
736
737
738
739
740
741
742
743
744
745
746
        """Get parameters for this estimator.

        Parameters
        ----------
        deep : bool, optional (default=True)
            If True, will return the parameters for this estimator and
            contained subobjects that are estimators.

        Returns
        -------
        params : dict
            Parameter names mapped to their values.
        """
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
        # Based on: https://github.com/dmlc/xgboost/blob/bd92b1c9c0db3e75ec3dfa513e1435d518bb535d/python-package/xgboost/sklearn.py#L941
        # which was based on: https://stackoverflow.com/questions/59248211
        #
        # `get_params()` flows like this:
        #
        # 0. Get parameters in subclass (self.__class__) first, by using inspect.
        # 1. Get parameters in all parent classes (especially `LGBMModel`).
        # 2. Get whatever was passed via `**kwargs`.
        # 3. Merge them.
        #
        # This needs to accommodate being called recursively in the following
        # inheritance graphs (and similar for classification and ranking):
        #
        #   DaskLGBMRegressor -> LGBMRegressor     -> LGBMModel -> BaseEstimator
        #   (custom subclass) -> LGBMRegressor     -> LGBMModel -> BaseEstimator
        #                        LGBMRegressor     -> LGBMModel -> BaseEstimator
        #                        (custom subclass) -> LGBMModel -> BaseEstimator
        #                                             LGBMModel -> BaseEstimator
        #
766
        params = super().get_params(deep=deep)
767
768
769
770
771
772
773
774
775
        cp = copy.copy(self)
        # If the immediate parent defines get_params(), use that.
        if callable(getattr(cp.__class__.__bases__[0], "get_params", None)):
            cp.__class__ = cp.__class__.__bases__[0]
        # Otherwise, skip it and assume the next class will have it.
        # This is here primarily for cases where the first class in MRO is a scikit-learn mixin.
        else:
            cp.__class__ = cp.__class__.__bases__[1]
        params.update(cp.__class__.get_params(cp, deep))
776
        params.update(self._other_params)
wxchan's avatar
wxchan committed
777
778
        return params

779
    def set_params(self, **params: Any) -> "LGBMModel":
780
781
782
783
784
785
786
787
788
789
790
791
        """Set the parameters of this estimator.

        Parameters
        ----------
        **params
            Parameter names with their new values.

        Returns
        -------
        self : object
            Returns self.
        """
wxchan's avatar
wxchan committed
792
793
        for key, value in params.items():
            setattr(self, key, value)
794
795
            if hasattr(self, f"_{key}"):
                setattr(self, f"_{key}", value)
796
            self._other_params[key] = value
wxchan's avatar
wxchan committed
797
        return self
wxchan's avatar
wxchan committed
798

799
800
801
802
803
804
805
806
807
808
809
810
811
812
    def _process_params(self, stage: str) -> Dict[str, Any]:
        """Process the parameters of this estimator based on its type, parameter aliases, etc.

        Parameters
        ----------
        stage : str
            Name of the stage (can be ``fit`` or ``predict``) this method is called from.

        Returns
        -------
        processed_params : dict
            Processed parameter names mapped to their values.
        """
        assert stage in {"fit", "predict"}
813
814
        params = self.get_params()

815
816
        params.pop("objective", None)
        for alias in _ConfigAliases.get("objective"):
817
            if alias in params:
818
                obj = params.pop(alias)
819
                _log_warning(f"Found '{alias}' in params. Will use it instead of 'objective' argument")
820
821
822
823
824
825
826
827
828
829
830
831
832
                if stage == "fit":
                    self._objective = obj
        if stage == "fit":
            if self._objective is None:
                if isinstance(self, LGBMRegressor):
                    self._objective = "regression"
                elif isinstance(self, LGBMClassifier):
                    if self._n_classes > 2:
                        self._objective = "multiclass"
                    else:
                        self._objective = "binary"
                elif isinstance(self, LGBMRanker):
                    self._objective = "lambdarank"
833
                else:
834
                    raise ValueError("Unknown LGBMModel type.")
835
        if callable(self._objective):
836
            if stage == "fit":
837
                params["objective"] = _ObjectiveFunctionWrapper(self._objective)
838
            else:
839
                params["objective"] = "None"
840
        else:
841
            params["objective"] = self._objective
842

843
844
845
        params.pop("importance_type", None)
        params.pop("n_estimators", None)
        params.pop("class_weight", None)
846

847
848
        if isinstance(params["random_state"], np.random.RandomState):
            params["random_state"] = params["random_state"].randint(np.iinfo(np.int32).max)
849
        elif isinstance(params["random_state"], np.random.Generator):
850
            params["random_state"] = int(params["random_state"].integers(np.iinfo(np.int32).max))
851
        if self._n_classes > 2:
852
            for alias in _ConfigAliases.get("num_class"):
853
                params.pop(alias, None)
854
855
            params["num_class"] = self._n_classes
        if hasattr(self, "_eval_at"):
856
            eval_at = self._eval_at
857
            for alias in _ConfigAliases.get("eval_at"):
858
859
860
                if alias in params:
                    _log_warning(f"Found '{alias}' in params. Will use it instead of 'eval_at' argument")
                    eval_at = params.pop(alias)
861
            params["eval_at"] = eval_at
wxchan's avatar
wxchan committed
862

863
        # register default metric for consistency with callable eval_metric case
864
        original_metric = self._objective if isinstance(self._objective, str) else None
865
866
867
868
869
870
871
872
873
874
        if original_metric is None:
            # try to deduce from class instance
            if isinstance(self, LGBMRegressor):
                original_metric = "l2"
            elif isinstance(self, LGBMClassifier):
                original_metric = "multi_logloss" if self._n_classes > 2 else "binary_logloss"
            elif isinstance(self, LGBMRanker):
                original_metric = "ndcg"

        # overwrite default metric by explicitly set metric
875
        params = _choose_param_value("metric", params, original_metric)
876

877
878
879
880
881
882
        # use joblib conventions for negative n_jobs, just like scikit-learn
        # at predict time, this is handled later due to the order of parameter updates
        if stage == "fit":
            params = _choose_param_value("num_threads", params, self.n_jobs)
            params["num_threads"] = self._process_n_jobs(params["num_threads"])

883
884
        return params

885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
    def _process_n_jobs(self, n_jobs: Optional[int]) -> int:
        """Convert special values of n_jobs to their actual values according to the formulas that apply.

        Parameters
        ----------
        n_jobs : int or None
            The original value of n_jobs, potentially having special values such as 'None' or
            negative integers.

        Returns
        -------
        n_jobs : int
            The value of n_jobs with special values converted to actual number of threads.
        """
        if n_jobs is None:
            n_jobs = _LGBMCpuCount(only_physical_cores=True)
        elif n_jobs < 0:
            n_jobs = max(_LGBMCpuCount(only_physical_cores=False) + 1 + n_jobs, 1)
        return n_jobs

905
906
    def fit(
        self,
907
908
        X: _LGBM_ScikitMatrixLike,
        y: _LGBM_LabelType,
909
910
        sample_weight: Optional[_LGBM_WeightType] = None,
        init_score: Optional[_LGBM_InitScoreType] = None,
911
        group: Optional[_LGBM_GroupType] = None,
912
        eval_set: Optional[List[_LGBM_ScikitValidSet]] = None,
913
        eval_names: Optional[List[str]] = None,
914
915
916
917
        eval_sample_weight: Optional[List[_LGBM_WeightType]] = None,
        eval_class_weight: Optional[List[float]] = None,
        eval_init_score: Optional[List[_LGBM_InitScoreType]] = None,
        eval_group: Optional[List[_LGBM_GroupType]] = None,
918
        eval_metric: Optional[_LGBM_ScikitEvalMetricType] = None,
919
920
        feature_name: _LGBM_FeatureNameConfiguration = "auto",
        categorical_feature: _LGBM_CategoricalFeatureConfiguration = "auto",
921
        callbacks: Optional[List[Callable]] = None,
922
        init_model: Optional[Union[str, Path, Booster, "LGBMModel"]] = None,
923
    ) -> "LGBMModel":
924
925
926
927
928
        """Docstring is set after definition, using a template."""
        params = self._process_params(stage="fit")

        # Do not modify original args in fit function
        # Refer to https://github.com/microsoft/LightGBM/pull/2619
929
930
931
932
933
934
935
        eval_metric_list: List[Union[str, _LGBM_ScikitCustomEvalFunction]]
        if eval_metric is None:
            eval_metric_list = []
        elif isinstance(eval_metric, list):
            eval_metric_list = copy.deepcopy(eval_metric)
        else:
            eval_metric_list = [copy.deepcopy(eval_metric)]
936
937
938
939
940

        # Separate built-in from callable evaluation metrics
        eval_metrics_callable = [_EvalFunctionWrapper(f) for f in eval_metric_list if callable(f)]
        eval_metrics_builtin = [m for m in eval_metric_list if isinstance(m, str)]

941
        # concatenate metric from params (or default if not provided in params) and eval_metric
942
943
944
        params["metric"] = [params["metric"]] if isinstance(params["metric"], (str, type(None))) else params["metric"]
        params["metric"] = [e for e in eval_metrics_builtin if e not in params["metric"]] + params["metric"]
        params["metric"] = [metric for metric in params["metric"] if metric is not None]
wxchan's avatar
wxchan committed
945

946
        if not isinstance(X, pd_DataFrame):
947
948
949
950
951
952
953
954
955
956
957
958
            _X, _y = _LGBMValidateData(
                self,
                X,
                y,
                reset=True,
                # allow any input type (this validation is done further down, in lgb.Dataset())
                accept_sparse=True,
                # do not raise an error if Inf of NaN values are found (LightGBM handles these internally)
                ensure_all_finite=False,
                # raise an error on 0-row and 1-row inputs
                ensure_min_samples=2,
            )
959
960
            if sample_weight is not None:
                sample_weight = _LGBMCheckSampleWeight(sample_weight, _X)
961
962
        else:
            _X, _y = X, y
963

964
965
966
            # for other data types, setting n_features_in_ is handled by _LGBMValidateData() in the branch above
            self.n_features_in_ = _X.shape[1]

967
968
969
970
        if self._class_weight is None:
            self._class_weight = self.class_weight
        if self._class_weight is not None:
            class_sample_weight = _LGBMComputeSampleWeight(self._class_weight, y)
971
972
973
974
            if sample_weight is None or len(sample_weight) == 0:
                sample_weight = class_sample_weight
            else:
                sample_weight = np.multiply(sample_weight, class_sample_weight)
975

976
977
978
979
980
981
982
        train_set = Dataset(
            data=_X,
            label=_y,
            weight=sample_weight,
            group=group,
            init_score=init_score,
            categorical_feature=categorical_feature,
983
            feature_name=feature_name,
984
985
            params=params,
        )
Guolin Ke's avatar
Guolin Ke committed
986

987
        valid_sets: List[Dataset] = []
Guolin Ke's avatar
Guolin Ke committed
988
989
990
991
        if eval_set is not None:
            if isinstance(eval_set, tuple):
                eval_set = [eval_set]
            for i, valid_data in enumerate(eval_set):
992
                # reduce cost for prediction training data
Guolin Ke's avatar
Guolin Ke committed
993
994
995
                if valid_data[0] is X and valid_data[1] is y:
                    valid_set = train_set
                else:
996
997
998
999
1000
1001
1002
1003
1004
1005
                    valid_weight = _extract_evaluation_meta_data(
                        collection=eval_sample_weight,
                        name="eval_sample_weight",
                        i=i,
                    )
                    valid_class_weight = _extract_evaluation_meta_data(
                        collection=eval_class_weight,
                        name="eval_class_weight",
                        i=i,
                    )
1006
1007
1008
1009
                    if valid_class_weight is not None:
                        if isinstance(valid_class_weight, dict) and self._class_map is not None:
                            valid_class_weight = {self._class_map[k]: v for k, v in valid_class_weight.items()}
                        valid_class_sample_weight = _LGBMComputeSampleWeight(valid_class_weight, valid_data[1])
1010
1011
1012
1013
                        if valid_weight is None or len(valid_weight) == 0:
                            valid_weight = valid_class_sample_weight
                        else:
                            valid_weight = np.multiply(valid_weight, valid_class_sample_weight)
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
                    valid_init_score = _extract_evaluation_meta_data(
                        collection=eval_init_score,
                        name="eval_init_score",
                        i=i,
                    )
                    valid_group = _extract_evaluation_meta_data(
                        collection=eval_group,
                        name="eval_group",
                        i=i,
                    )
1024
1025
1026
1027
1028
1029
1030
1031
1032
                    valid_set = Dataset(
                        data=valid_data[0],
                        label=valid_data[1],
                        weight=valid_weight,
                        group=valid_group,
                        init_score=valid_init_score,
                        categorical_feature="auto",
                        params=params,
                    )
1033

Guolin Ke's avatar
Guolin Ke committed
1034
1035
                valid_sets.append(valid_set)

1036
1037
1038
        if isinstance(init_model, LGBMModel):
            init_model = init_model.booster_

1039
1040
1041
        if callbacks is None:
            callbacks = []
        else:
1042
            callbacks = copy.copy(callbacks)  # don't use deepcopy here to allow non-serializable objects
1043

1044
        evals_result: _EvalResultDict = {}
1045
1046
1047
1048
1049
1050
1051
1052
        callbacks.append(record_evaluation(evals_result))

        self._Booster = train(
            params=params,
            train_set=train_set,
            num_boost_round=self.n_estimators,
            valid_sets=valid_sets,
            valid_names=eval_names,
1053
            feval=eval_metrics_callable,  # type: ignore[arg-type]
1054
            init_model=init_model,
1055
            callbacks=callbacks,
1056
        )
wxchan's avatar
wxchan committed
1057

1058
1059
1060
1061
1062
1063
1064
        # This populates the property self.n_features_, the number of features in the fitted model,
        # and so should only be set after fitting.
        #
        # The related property self._n_features_in, which populates self.n_features_in_,
        # is set BEFORE fitting.
        self._n_features = self._Booster.num_feature()

1065
        self._evals_result = evals_result
1066
        self._best_iteration = self._Booster.best_iteration
1067
        self._best_score = self._Booster.best_score
wxchan's avatar
wxchan committed
1068

1069
1070
        self.fitted_ = True

wxchan's avatar
wxchan committed
1071
        # free dataset
1072
        self._Booster.free_dataset()
wxchan's avatar
wxchan committed
1073
        del train_set, valid_sets
wxchan's avatar
wxchan committed
1074
1075
        return self

1076
1077
    fit.__doc__ = (
        _lgbmmodel_doc_fit.format(
1078
            X_shape="numpy array, pandas DataFrame, scipy.sparse, list of lists of int or float of shape = [n_samples, n_features]",
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
            y_shape="numpy array, pandas DataFrame, pandas Series, list of int or float of shape = [n_samples]",
            sample_weight_shape="numpy array, pandas Series, list of int or float of shape = [n_samples] or None, optional (default=None)",
            init_score_shape="numpy array, pandas DataFrame, pandas Series, list of int or float of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task) or shape = [n_samples, n_classes] (for multi-class task) or None, optional (default=None)",
            group_shape="numpy array, pandas Series, list of int or float, or None, optional (default=None)",
            eval_sample_weight_shape="list of array (same types as ``sample_weight`` supports), or None, optional (default=None)",
            eval_init_score_shape="list of array (same types as ``init_score`` supports), or None, optional (default=None)",
            eval_group_shape="list of array (same types as ``group`` supports), or None, optional (default=None)",
        )
        + "\n\n"
        + _lgbmmodel_doc_custom_eval_note
    )
1090

1091
1092
    def predict(
        self,
1093
        X: _LGBM_ScikitMatrixLike,
1094
1095
1096
1097
1098
1099
        raw_score: bool = False,
        start_iteration: int = 0,
        num_iteration: Optional[int] = None,
        pred_leaf: bool = False,
        pred_contrib: bool = False,
        validate_features: bool = False,
1100
        **kwargs: Any,
1101
    ):
1102
        """Docstring is set after definition, using a template."""
1103
        if not self.__sklearn_is_fitted__():
1104
            raise LGBMNotFittedError("Estimator not fitted, call fit before exploiting the model.")
1105
        if not isinstance(X, pd_DataFrame):
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
            X = _LGBMValidateData(
                self,
                X,
                # 'y' being omitted = run scikit-learn's check_array() instead of check_X_y()
                #
                # Prevent scikit-learn from deleting or modifying attributes like 'feature_names_in_' and 'n_features_in_'.
                # These shouldn't be changed at predict() time.
                reset=False,
                # allow any input type (this validation is done further down, in lgb.Dataset())
                accept_sparse=True,
                # do not raise an error if Inf of NaN values are found (LightGBM handles these internally)
                ensure_all_finite=False,
                # raise an error on 0-row inputs
                ensure_min_samples=1,
1120
            )
1121
        # retrieve original params that possibly can be used in both training and prediction
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
        # and then overwrite them (considering aliases) with params that were passed directly in prediction
        predict_params = self._process_params(stage="predict")
        for alias in _ConfigAliases.get_by_alias(
            "data",
            "X",
            "raw_score",
            "start_iteration",
            "num_iteration",
            "pred_leaf",
            "pred_contrib",
1132
            *kwargs.keys(),
1133
1134
1135
        ):
            predict_params.pop(alias, None)
        predict_params.update(kwargs)
1136
1137
1138

        # number of threads can have values with special meaning which is only applied
        # in the scikit-learn interface, these should not reach the c++ side as-is
1139
1140
        predict_params = _choose_param_value("num_threads", predict_params, self.n_jobs)
        predict_params["num_threads"] = self._process_n_jobs(predict_params["num_threads"])
1141

1142
        return self._Booster.predict(  # type: ignore[union-attr]
1143
1144
1145
1146
1147
1148
1149
1150
            X,
            raw_score=raw_score,
            start_iteration=start_iteration,
            num_iteration=num_iteration,
            pred_leaf=pred_leaf,
            pred_contrib=pred_contrib,
            validate_features=validate_features,
            **predict_params,
1151
        )
wxchan's avatar
wxchan committed
1152

1153
1154
    predict.__doc__ = _lgbmmodel_doc_predict.format(
        description="Return the predicted value for each sample.",
1155
        X_shape="numpy array, pandas DataFrame, scipy.sparse, list of lists of int or float of shape = [n_samples, n_features]",
1156
1157
1158
        output_name="predicted_result",
        predicted_result_shape="array-like of shape = [n_samples] or shape = [n_samples, n_classes]",
        X_leaves_shape="array-like of shape = [n_samples, n_trees] or shape = [n_samples, n_trees * n_classes]",
1159
        X_SHAP_values_shape="array-like of shape = [n_samples, n_features + 1] or shape = [n_samples, (n_features + 1) * n_classes] or list with n_classes length of such objects",
1160
1161
    )

1162
    @property
1163
    def n_features_(self) -> int:
1164
        """:obj:`int`: The number of features of fitted model."""
1165
        if not self.__sklearn_is_fitted__():
1166
            raise LGBMNotFittedError("No n_features found. Need to call fit beforehand.")
1167
1168
        return self._n_features

1169
    @property
1170
    def n_features_in_(self) -> int:
1171
        """:obj:`int`: The number of features of fitted model."""
1172
        if not self.__sklearn_is_fitted__():
1173
            raise LGBMNotFittedError("No n_features_in found. Need to call fit beforehand.")
1174
1175
        return self._n_features_in

1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
    @n_features_in_.setter
    def n_features_in_(self, value: int) -> None:
        """Set number of features found in passed-in dataset.

        Starting with ``scikit-learn`` 1.6, ``scikit-learn`` expects to be able to directly
        set this property in functions like ``validate_data()``.

        .. note::

            Do not call ``estimator.n_features_in_ = some_int`` or anything else that invokes
            this method. It is only here for compatibility with ``scikit-learn`` validation
            functions used internally in ``lightgbm``.
        """
        self._n_features_in = value

1191
    @property
1192
    def best_score_(self) -> _LGBM_BoosterBestScoreType:
1193
        """:obj:`dict`: The best score of fitted model."""
1194
        if not self.__sklearn_is_fitted__():
1195
            raise LGBMNotFittedError("No best_score found. Need to call fit beforehand.")
1196
1197
1198
        return self._best_score

    @property
1199
    def best_iteration_(self) -> int:
1200
        """:obj:`int`: The best iteration of fitted model if ``early_stopping()`` callback has been specified."""
1201
        if not self.__sklearn_is_fitted__():
1202
1203
1204
            raise LGBMNotFittedError(
                "No best_iteration found. Need to call fit with early_stopping callback beforehand."
            )
1205
1206
1207
        return self._best_iteration

    @property
1208
    def objective_(self) -> Union[str, _LGBM_ScikitCustomObjectiveFunction]:
1209
        """:obj:`str` or :obj:`callable`: The concrete objective used while fitting this model."""
1210
        if not self.__sklearn_is_fitted__():
1211
            raise LGBMNotFittedError("No objective found. Need to call fit beforehand.")
1212
        return self._objective  # type: ignore[return-value]
1213

1214
1215
1216
1217
1218
1219
    @property
    def n_estimators_(self) -> int:
        """:obj:`int`: True number of boosting iterations performed.

        This might be less than parameter ``n_estimators`` if early stopping was enabled or
        if boosting stopped early due to limits on complexity like ``min_gain_to_split``.
1220

1221
        .. versionadded:: 4.0.0
1222
1223
        """
        if not self.__sklearn_is_fitted__():
1224
            raise LGBMNotFittedError("No n_estimators found. Need to call fit beforehand.")
1225
        return self._Booster.current_iteration()  # type: ignore
1226
1227
1228
1229
1230
1231
1232

    @property
    def n_iter_(self) -> int:
        """:obj:`int`: True number of boosting iterations performed.

        This might be less than parameter ``n_estimators`` if early stopping was enabled or
        if boosting stopped early due to limits on complexity like ``min_gain_to_split``.
1233

1234
        .. versionadded:: 4.0.0
1235
1236
        """
        if not self.__sklearn_is_fitted__():
1237
            raise LGBMNotFittedError("No n_iter found. Need to call fit beforehand.")
1238
        return self._Booster.current_iteration()  # type: ignore
1239

1240
    @property
1241
    def booster_(self) -> Booster:
1242
        """Booster: The underlying Booster of this model."""
1243
        if not self.__sklearn_is_fitted__():
1244
            raise LGBMNotFittedError("No booster found. Need to call fit beforehand.")
1245
        return self._Booster  # type: ignore[return-value]
wxchan's avatar
wxchan committed
1246

1247
    @property
1248
    def evals_result_(self) -> _EvalResultDict:
1249
        """:obj:`dict`: The evaluation results if validation sets have been specified."""
1250
        if not self.__sklearn_is_fitted__():
1251
            raise LGBMNotFittedError("No results found. Need to call fit with eval_set beforehand.")
1252
        return self._evals_result
1253
1254

    @property
1255
    def feature_importances_(self) -> np.ndarray:
1256
        """:obj:`array` of shape = [n_features]: The feature importances (the higher, the more important).
1257

Nikita Titov's avatar
Nikita Titov committed
1258
1259
1260
1261
        .. note::

            ``importance_type`` attribute is passed to the function
            to configure the type of importance values to be extracted.
1262
        """
1263
        if not self.__sklearn_is_fitted__():
1264
            raise LGBMNotFittedError("No feature_importances found. Need to call fit beforehand.")
1265
        return self._Booster.feature_importance(importance_type=self.importance_type)  # type: ignore[union-attr]
wxchan's avatar
wxchan committed
1266

1267
    @property
1268
    def feature_name_(self) -> List[str]:
1269
1270
1271
1272
1273
1274
        """:obj:`list` of shape = [n_features]: The names of features.

        .. note::

            If input does not contain feature names, they will be added during fitting in the format ``Column_0``, ``Column_1``, ..., ``Column_N``.
        """
1275
        if not self.__sklearn_is_fitted__():
1276
            raise LGBMNotFittedError("No feature_name found. Need to call fit beforehand.")
1277
        return self._Booster.feature_name()  # type: ignore[union-attr]
1278

1279
1280
    @property
    def feature_names_in_(self) -> np.ndarray:
James Lamb's avatar
James Lamb committed
1281
1282
1283
1284
        """:obj:`array` of shape = [n_features]: scikit-learn compatible version of ``.feature_name_``.

        .. versionadded:: 4.5.0
        """
1285
1286
1287
1288
        if not self.__sklearn_is_fitted__():
            raise LGBMNotFittedError("No feature_names_in_ found. Need to call fit beforehand.")
        return np.array(self.feature_name_)

1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
    @feature_names_in_.deleter
    def feature_names_in_(self) -> None:
        """Intercept calls to delete ``feature_names_in_``.

        Some code paths in ``scikit-learn`` try to delete the ``feature_names_in_`` attribute
        on estimators when a new training dataset that doesn't have features is passed.
        LightGBM automatically assigns feature names to such datasets
        (like ``Column_0``, ``Column_1``, etc.) and so does not want that behavior.

        However, that behavior is coupled to ``scikit-learn`` automatically updating
        ``n_features_in_`` in those same code paths, which is necessary for compliance
        with its API (via argument ``reset`` to functions like ``validate_data()`` and
        ``check_array()``).

        .. note::

            Do not call ``del estimator.feature_names_in_`` or anything else that invokes
            this method. It is only here for compatibility with ``scikit-learn`` validation
            functions used internally in ``lightgbm``.
        """
        pass

wxchan's avatar
wxchan committed
1311

1312
class LGBMRegressor(_LGBMRegressorBase, LGBMModel):
1313
    """LightGBM regressor."""
wxchan's avatar
wxchan committed
1314

1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
    # NOTE: all args from LGBMModel.__init__() are intentionally repeated here for
    #       docs, help(), and tab completion.
    def __init__(
        self,
        *,
        boosting_type: str = "gbdt",
        num_leaves: int = 31,
        max_depth: int = -1,
        learning_rate: float = 0.1,
        n_estimators: int = 100,
        subsample_for_bin: int = 200000,
        objective: Optional[Union[str, _LGBM_ScikitCustomObjectiveFunction]] = None,
        class_weight: Optional[Union[Dict, str]] = None,
        min_split_gain: float = 0.0,
        min_child_weight: float = 1e-3,
        min_child_samples: int = 20,
        subsample: float = 1.0,
        subsample_freq: int = 0,
        colsample_bytree: float = 1.0,
        reg_alpha: float = 0.0,
        reg_lambda: float = 0.0,
        random_state: Optional[Union[int, np.random.RandomState, np.random.Generator]] = None,
        n_jobs: Optional[int] = None,
        importance_type: str = "split",
        **kwargs: Any,
    ) -> None:
        super().__init__(
            boosting_type=boosting_type,
            num_leaves=num_leaves,
            max_depth=max_depth,
            learning_rate=learning_rate,
            n_estimators=n_estimators,
            subsample_for_bin=subsample_for_bin,
            objective=objective,
            class_weight=class_weight,
            min_split_gain=min_split_gain,
            min_child_weight=min_child_weight,
            min_child_samples=min_child_samples,
            subsample=subsample,
            subsample_freq=subsample_freq,
            colsample_bytree=colsample_bytree,
            reg_alpha=reg_alpha,
            reg_lambda=reg_lambda,
            random_state=random_state,
            n_jobs=n_jobs,
            importance_type=importance_type,
            **kwargs,
        )

    __init__.__doc__ = LGBMModel.__init__.__doc__

1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
    def _more_tags(self) -> Dict[str, Any]:
        # handle the case where RegressorMixin possibly provides _more_tags()
        if callable(getattr(_LGBMRegressorBase, "_more_tags", None)):
            tags = _LGBMRegressorBase._more_tags(self)
        else:
            tags = {}
        # override those with LightGBM-specific preferences
        tags.update(LGBMModel._more_tags(self))
        return tags

    def __sklearn_tags__(self) -> "_sklearn_Tags":
1377
        return super().__sklearn_tags__()
1378

1379
    def fit(  # type: ignore[override]
1380
        self,
1381
1382
        X: _LGBM_ScikitMatrixLike,
        y: _LGBM_LabelType,
1383
1384
1385
        sample_weight: Optional[_LGBM_WeightType] = None,
        init_score: Optional[_LGBM_InitScoreType] = None,
        eval_set: Optional[List[_LGBM_ScikitValidSet]] = None,
1386
        eval_names: Optional[List[str]] = None,
1387
1388
        eval_sample_weight: Optional[List[_LGBM_WeightType]] = None,
        eval_init_score: Optional[List[_LGBM_InitScoreType]] = None,
1389
        eval_metric: Optional[_LGBM_ScikitEvalMetricType] = None,
1390
1391
        feature_name: _LGBM_FeatureNameConfiguration = "auto",
        categorical_feature: _LGBM_CategoricalFeatureConfiguration = "auto",
1392
        callbacks: Optional[List[Callable]] = None,
1393
        init_model: Optional[Union[str, Path, Booster, LGBMModel]] = None,
1394
    ) -> "LGBMRegressor":
1395
        """Docstring is inherited from the LGBMModel."""
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
        super().fit(
            X,
            y,
            sample_weight=sample_weight,
            init_score=init_score,
            eval_set=eval_set,
            eval_names=eval_names,
            eval_sample_weight=eval_sample_weight,
            eval_init_score=eval_init_score,
            eval_metric=eval_metric,
            feature_name=feature_name,
            categorical_feature=categorical_feature,
            callbacks=callbacks,
1409
            init_model=init_model,
1410
        )
Guolin Ke's avatar
Guolin Ke committed
1411
1412
        return self

1413
    _base_doc = LGBMModel.fit.__doc__.replace("self : LGBMModel", "self : LGBMRegressor")  # type: ignore
1414
1415
1416
1417
1418
1419
    _base_doc = (
        _base_doc[: _base_doc.find("group :")]  # type: ignore
        + _base_doc[_base_doc.find("eval_set :") :]
    )  # type: ignore
    _base_doc = _base_doc[: _base_doc.find("eval_class_weight :")] + _base_doc[_base_doc.find("eval_init_score :") :]
    fit.__doc__ = _base_doc[: _base_doc.find("eval_group :")] + _base_doc[_base_doc.find("eval_metric :") :]
wxchan's avatar
wxchan committed
1420

1421

1422
class LGBMClassifier(_LGBMClassifierBase, LGBMModel):
1423
    """LightGBM classifier."""
wxchan's avatar
wxchan committed
1424

1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
    # NOTE: all args from LGBMModel.__init__() are intentionally repeated here for
    #       docs, help(), and tab completion.
    def __init__(
        self,
        *,
        boosting_type: str = "gbdt",
        num_leaves: int = 31,
        max_depth: int = -1,
        learning_rate: float = 0.1,
        n_estimators: int = 100,
        subsample_for_bin: int = 200000,
        objective: Optional[Union[str, _LGBM_ScikitCustomObjectiveFunction]] = None,
        class_weight: Optional[Union[Dict, str]] = None,
        min_split_gain: float = 0.0,
        min_child_weight: float = 1e-3,
        min_child_samples: int = 20,
        subsample: float = 1.0,
        subsample_freq: int = 0,
        colsample_bytree: float = 1.0,
        reg_alpha: float = 0.0,
        reg_lambda: float = 0.0,
        random_state: Optional[Union[int, np.random.RandomState, np.random.Generator]] = None,
        n_jobs: Optional[int] = None,
        importance_type: str = "split",
        **kwargs: Any,
    ) -> None:
        super().__init__(
            boosting_type=boosting_type,
            num_leaves=num_leaves,
            max_depth=max_depth,
            learning_rate=learning_rate,
            n_estimators=n_estimators,
            subsample_for_bin=subsample_for_bin,
            objective=objective,
            class_weight=class_weight,
            min_split_gain=min_split_gain,
            min_child_weight=min_child_weight,
            min_child_samples=min_child_samples,
            subsample=subsample,
            subsample_freq=subsample_freq,
            colsample_bytree=colsample_bytree,
            reg_alpha=reg_alpha,
            reg_lambda=reg_lambda,
            random_state=random_state,
            n_jobs=n_jobs,
            importance_type=importance_type,
            **kwargs,
        )

    __init__.__doc__ = LGBMModel.__init__.__doc__

1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
    def _more_tags(self) -> Dict[str, Any]:
        # handle the case where ClassifierMixin possibly provides _more_tags()
        if callable(getattr(_LGBMClassifierBase, "_more_tags", None)):
            tags = _LGBMClassifierBase._more_tags(self)
        else:
            tags = {}
        # override those with LightGBM-specific preferences
        tags.update(LGBMModel._more_tags(self))
        return tags

    def __sklearn_tags__(self) -> "_sklearn_Tags":
1487
1488
1489
        tags = super().__sklearn_tags__()
        tags.classifier_tags.multi_class = True
        tags.classifier_tags.multi_label = False
1490
        return tags
1491

1492
    def fit(  # type: ignore[override]
1493
        self,
1494
1495
        X: _LGBM_ScikitMatrixLike,
        y: _LGBM_LabelType,
1496
1497
1498
        sample_weight: Optional[_LGBM_WeightType] = None,
        init_score: Optional[_LGBM_InitScoreType] = None,
        eval_set: Optional[List[_LGBM_ScikitValidSet]] = None,
1499
        eval_names: Optional[List[str]] = None,
1500
1501
1502
        eval_sample_weight: Optional[List[_LGBM_WeightType]] = None,
        eval_class_weight: Optional[List[float]] = None,
        eval_init_score: Optional[List[_LGBM_InitScoreType]] = None,
1503
        eval_metric: Optional[_LGBM_ScikitEvalMetricType] = None,
1504
1505
        feature_name: _LGBM_FeatureNameConfiguration = "auto",
        categorical_feature: _LGBM_CategoricalFeatureConfiguration = "auto",
1506
        callbacks: Optional[List[Callable]] = None,
1507
        init_model: Optional[Union[str, Path, Booster, LGBMModel]] = None,
1508
    ) -> "LGBMClassifier":
1509
        """Docstring is inherited from the LGBMModel."""
1510
        _LGBMAssertAllFinite(y)
1511
1512
        _LGBMCheckClassificationTargets(y)
        self._le = _LGBMLabelEncoder().fit(y)
1513
        _y = self._le.transform(y)
1514
        self._class_map = dict(zip(self._le.classes_, self._le.transform(self._le.classes_)))
1515
1516
        if isinstance(self.class_weight, dict):
            self._class_weight = {self._class_map[k]: v for k, v in self.class_weight.items()}
1517

1518
        self._classes = self._le.classes_
1519
        self._n_classes = len(self._classes)  # type: ignore[arg-type]
1520
1521
        if self.objective is None:
            self._objective = None
1522

1523
1524
        # adjust eval metrics to match whether binary or multiclass
        # classification is being performed
1525
        if not callable(eval_metric):
1526
1527
1528
1529
1530
1531
            if isinstance(eval_metric, list):
                eval_metric_list = eval_metric
            elif isinstance(eval_metric, str):
                eval_metric_list = [eval_metric]
            else:
                eval_metric_list = []
1532
            if self.__is_multiclass:
1533
                for index, metric in enumerate(eval_metric_list):
1534
                    if metric in {"logloss", "binary_logloss"}:
1535
                        eval_metric_list[index] = "multi_logloss"
1536
                    elif metric in {"error", "binary_error"}:
1537
                        eval_metric_list[index] = "multi_error"
1538
            else:
1539
                for index, metric in enumerate(eval_metric_list):
1540
1541
1542
1543
                    if metric in {"logloss", "multi_logloss"}:
                        eval_metric_list[index] = "binary_logloss"
                    elif metric in {"error", "multi_error"}:
                        eval_metric_list[index] = "binary_error"
1544
            eval_metric = eval_metric_list
wxchan's avatar
wxchan committed
1545

1546
        # do not modify args, as it causes errors in model selection tools
1547
        valid_sets: Optional[List[_LGBM_ScikitValidSet]] = None
wxchan's avatar
wxchan committed
1548
        if eval_set is not None:
1549
1550
            if isinstance(eval_set, tuple):
                eval_set = [eval_set]
1551
1552
            valid_sets = []
            for valid_x, valid_y in eval_set:
1553
                if valid_x is X and valid_y is y:
1554
                    valid_sets.append((valid_x, _y))
1555
                else:
1556
                    valid_sets.append((valid_x, self._le.transform(valid_y)))
1557

1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
        super().fit(
            X,
            _y,
            sample_weight=sample_weight,
            init_score=init_score,
            eval_set=valid_sets,
            eval_names=eval_names,
            eval_sample_weight=eval_sample_weight,
            eval_class_weight=eval_class_weight,
            eval_init_score=eval_init_score,
            eval_metric=eval_metric,
            feature_name=feature_name,
            categorical_feature=categorical_feature,
            callbacks=callbacks,
1572
            init_model=init_model,
1573
        )
wxchan's avatar
wxchan committed
1574
1575
        return self

1576
    _base_doc = LGBMModel.fit.__doc__.replace("self : LGBMModel", "self : LGBMClassifier")  # type: ignore
1577
1578
1579
1580
1581
    _base_doc = (
        _base_doc[: _base_doc.find("group :")]  # type: ignore
        + _base_doc[_base_doc.find("eval_set :") :]
    )  # type: ignore
    fit.__doc__ = _base_doc[: _base_doc.find("eval_group :")] + _base_doc[_base_doc.find("eval_metric :") :]
1582

1583
1584
    def predict(
        self,
1585
        X: _LGBM_ScikitMatrixLike,
1586
1587
1588
1589
1590
1591
        raw_score: bool = False,
        start_iteration: int = 0,
        num_iteration: Optional[int] = None,
        pred_leaf: bool = False,
        pred_contrib: bool = False,
        validate_features: bool = False,
1592
        **kwargs: Any,
1593
    ):
1594
        """Docstring is inherited from the LGBMModel."""
1595
1596
1597
1598
1599
1600
1601
1602
        result = self.predict_proba(
            X=X,
            raw_score=raw_score,
            start_iteration=start_iteration,
            num_iteration=num_iteration,
            pred_leaf=pred_leaf,
            pred_contrib=pred_contrib,
            validate_features=validate_features,
1603
            **kwargs,
1604
        )
1605
        if callable(self._objective) or raw_score or pred_leaf or pred_contrib:
1606
1607
1608
1609
            return result
        else:
            class_index = np.argmax(result, axis=1)
            return self._le.inverse_transform(class_index)
wxchan's avatar
wxchan committed
1610

1611
1612
    predict.__doc__ = LGBMModel.predict.__doc__

1613
1614
    def predict_proba(
        self,
1615
        X: _LGBM_ScikitMatrixLike,
1616
1617
1618
1619
1620
1621
        raw_score: bool = False,
        start_iteration: int = 0,
        num_iteration: Optional[int] = None,
        pred_leaf: bool = False,
        pred_contrib: bool = False,
        validate_features: bool = False,
1622
        **kwargs: Any,
1623
    ):
1624
        """Docstring is set after definition, using a template."""
1625
1626
1627
1628
1629
1630
1631
1632
        result = super().predict(
            X=X,
            raw_score=raw_score,
            start_iteration=start_iteration,
            num_iteration=num_iteration,
            pred_leaf=pred_leaf,
            pred_contrib=pred_contrib,
            validate_features=validate_features,
1633
            **kwargs,
1634
        )
1635
        if callable(self._objective) and not (raw_score or pred_leaf or pred_contrib):
1636
1637
1638
1639
1640
            _log_warning(
                "Cannot compute class probabilities or labels "
                "due to the usage of customized objective function.\n"
                "Returning raw scores instead."
            )
1641
            return result
1642
        elif self.__is_multiclass or raw_score or pred_leaf or pred_contrib:  # type: ignore [operator]
1643
            return result
wxchan's avatar
wxchan committed
1644
        else:
1645
            return np.vstack((1.0 - result, result)).transpose()
1646

1647
1648
    predict_proba.__doc__ = _lgbmmodel_doc_predict.format(
        description="Return the predicted probability for each class for each sample.",
1649
        X_shape="numpy array, pandas DataFrame, scipy.sparse, list of lists of int or float of shape = [n_samples, n_features]",
1650
        output_name="predicted_probability",
1651
        predicted_result_shape="array-like of shape = [n_samples] or shape = [n_samples, n_classes]",
1652
        X_leaves_shape="array-like of shape = [n_samples, n_trees] or shape = [n_samples, n_trees * n_classes]",
1653
        X_SHAP_values_shape="array-like of shape = [n_samples, n_features + 1] or shape = [n_samples, (n_features + 1) * n_classes] or list with n_classes length of such objects",
1654
1655
    )

1656
    @property
1657
    def classes_(self) -> np.ndarray:
1658
        """:obj:`array` of shape = [n_classes]: The class label array."""
1659
        if not self.__sklearn_is_fitted__():
1660
            raise LGBMNotFittedError("No classes found. Need to call fit beforehand.")
1661
        return self._classes  # type: ignore[return-value]
1662
1663

    @property
1664
    def n_classes_(self) -> int:
1665
        """:obj:`int`: The number of classes."""
1666
        if not self.__sklearn_is_fitted__():
1667
            raise LGBMNotFittedError("No classes found. Need to call fit beforehand.")
1668
        return self._n_classes
wxchan's avatar
wxchan committed
1669

1670
1671
1672
1673
1674
    @property
    def __is_multiclass(self) -> bool:
        """:obj:`bool`:  Indicator of whether the classifier is used for multiclass."""
        return self._n_classes > 2 or (isinstance(self._objective, str) and self._objective in _MULTICLASS_OBJECTIVES)

wxchan's avatar
wxchan committed
1675

wxchan's avatar
wxchan committed
1676
class LGBMRanker(LGBMModel):
1677
1678
1679
1680
1681
1682
1683
1684
    """LightGBM ranker.

    .. warning::

        scikit-learn doesn't support ranking applications yet,
        therefore this class is not really compatible with the sklearn ecosystem.
        Please use this class mainly for training and applying ranking models in common sklearnish way.
    """
wxchan's avatar
wxchan committed
1685

1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
    # NOTE: all args from LGBMModel.__init__() are intentionally repeated here for
    #       docs, help(), and tab completion.
    def __init__(
        self,
        *,
        boosting_type: str = "gbdt",
        num_leaves: int = 31,
        max_depth: int = -1,
        learning_rate: float = 0.1,
        n_estimators: int = 100,
        subsample_for_bin: int = 200000,
        objective: Optional[Union[str, _LGBM_ScikitCustomObjectiveFunction]] = None,
        class_weight: Optional[Union[Dict, str]] = None,
        min_split_gain: float = 0.0,
        min_child_weight: float = 1e-3,
        min_child_samples: int = 20,
        subsample: float = 1.0,
        subsample_freq: int = 0,
        colsample_bytree: float = 1.0,
        reg_alpha: float = 0.0,
        reg_lambda: float = 0.0,
        random_state: Optional[Union[int, np.random.RandomState, np.random.Generator]] = None,
        n_jobs: Optional[int] = None,
        importance_type: str = "split",
        **kwargs: Any,
    ) -> None:
        super().__init__(
            boosting_type=boosting_type,
            num_leaves=num_leaves,
            max_depth=max_depth,
            learning_rate=learning_rate,
            n_estimators=n_estimators,
            subsample_for_bin=subsample_for_bin,
            objective=objective,
            class_weight=class_weight,
            min_split_gain=min_split_gain,
            min_child_weight=min_child_weight,
            min_child_samples=min_child_samples,
            subsample=subsample,
            subsample_freq=subsample_freq,
            colsample_bytree=colsample_bytree,
            reg_alpha=reg_alpha,
            reg_lambda=reg_lambda,
            random_state=random_state,
            n_jobs=n_jobs,
            importance_type=importance_type,
            **kwargs,
        )

    __init__.__doc__ = LGBMModel.__init__.__doc__

1737
    def fit(  # type: ignore[override]
1738
        self,
1739
1740
        X: _LGBM_ScikitMatrixLike,
        y: _LGBM_LabelType,
1741
1742
        sample_weight: Optional[_LGBM_WeightType] = None,
        init_score: Optional[_LGBM_InitScoreType] = None,
1743
        group: Optional[_LGBM_GroupType] = None,
1744
        eval_set: Optional[List[_LGBM_ScikitValidSet]] = None,
1745
        eval_names: Optional[List[str]] = None,
1746
1747
1748
        eval_sample_weight: Optional[List[_LGBM_WeightType]] = None,
        eval_init_score: Optional[List[_LGBM_InitScoreType]] = None,
        eval_group: Optional[List[_LGBM_GroupType]] = None,
1749
        eval_metric: Optional[_LGBM_ScikitEvalMetricType] = None,
1750
        eval_at: Union[List[int], Tuple[int, ...]] = (1, 2, 3, 4, 5),
1751
1752
        feature_name: _LGBM_FeatureNameConfiguration = "auto",
        categorical_feature: _LGBM_CategoricalFeatureConfiguration = "auto",
1753
        callbacks: Optional[List[Callable]] = None,
1754
        init_model: Optional[Union[str, Path, Booster, LGBMModel]] = None,
1755
    ) -> "LGBMRanker":
1756
        """Docstring is inherited from the LGBMModel."""
1757
        # check group data
Guolin Ke's avatar
Guolin Ke committed
1758
        if group is None:
1759
            raise ValueError("Should set group for ranking task")
wxchan's avatar
wxchan committed
1760
1761

        if eval_set is not None:
Guolin Ke's avatar
Guolin Ke committed
1762
            if eval_group is None:
1763
                raise ValueError("Eval_group cannot be None when eval_set is not None")
Guolin Ke's avatar
Guolin Ke committed
1764
            elif len(eval_group) != len(eval_set):
1765
                raise ValueError("Length of eval_group should be equal to eval_set")
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
            elif (
                isinstance(eval_group, dict)
                and any(i not in eval_group or eval_group[i] is None for i in range(len(eval_group)))
                or isinstance(eval_group, list)
                and any(group is None for group in eval_group)
            ):
                raise ValueError(
                    "Should set group for all eval datasets for ranking task; "
                    "if you use dict, the index should start from 0"
                )
1776

1777
        self._eval_at = eval_at
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
        super().fit(
            X,
            y,
            sample_weight=sample_weight,
            init_score=init_score,
            group=group,
            eval_set=eval_set,
            eval_names=eval_names,
            eval_sample_weight=eval_sample_weight,
            eval_init_score=eval_init_score,
            eval_group=eval_group,
            eval_metric=eval_metric,
            feature_name=feature_name,
            categorical_feature=categorical_feature,
            callbacks=callbacks,
1793
            init_model=init_model,
1794
        )
wxchan's avatar
wxchan committed
1795
        return self
1796

1797
    _base_doc = LGBMModel.fit.__doc__.replace("self : LGBMModel", "self : LGBMRanker")  # type: ignore
1798
1799
1800
1801
    fit.__doc__ = (
        _base_doc[: _base_doc.find("eval_class_weight :")]  # type: ignore
        + _base_doc[_base_doc.find("eval_init_score :") :]
    )  # type: ignore
1802
    _base_doc = fit.__doc__
1803
    _before_feature_name, _feature_name, _after_feature_name = _base_doc.partition("feature_name :")
1804
    fit.__doc__ = f"""{_before_feature_name}eval_at : list or tuple of int, optional (default=(1, 2, 3, 4, 5))
1805
        The evaluation positions of the specified metric.
1806
    {_feature_name}{_after_feature_name}"""