sklearn.py 71 KB
Newer Older
wxchan's avatar
wxchan committed
1
# coding: utf-8
2
"""Scikit-learn wrapper interface for LightGBM."""
3

4
import copy
5
from inspect import signature
6
from pathlib import Path
7
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union
8

wxchan's avatar
wxchan committed
9
import numpy as np
10
import scipy.sparse
11

12
from .basic import (
13
    _MULTICLASS_OBJECTIVES,
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
    Booster,
    Dataset,
    LightGBMError,
    _choose_param_value,
    _ConfigAliases,
    _LGBM_BoosterBestScoreType,
    _LGBM_CategoricalFeatureConfiguration,
    _LGBM_EvalFunctionResultType,
    _LGBM_FeatureNameConfiguration,
    _LGBM_GroupType,
    _LGBM_InitScoreType,
    _LGBM_LabelType,
    _LGBM_WeightType,
    _log_warning,
)
29
from .callback import _EvalResultDict, record_evaluation
30
31
32
33
34
35
36
37
38
39
40
41
from .compat import (
    SKLEARN_INSTALLED,
    LGBMNotFittedError,
    _LGBMAssertAllFinite,
    _LGBMCheckClassificationTargets,
    _LGBMCheckSampleWeight,
    _LGBMClassifierBase,
    _LGBMComputeSampleWeight,
    _LGBMCpuCount,
    _LGBMLabelEncoder,
    _LGBMModelBase,
    _LGBMRegressorBase,
42
43
    _LGBMValidateData,
    _sklearn_version,
44
45
46
    dt_DataTable,
    pd_DataFrame,
)
wxchan's avatar
wxchan committed
47
from .engine import train
48

49
50
51
52
if TYPE_CHECKING:
    from .compat import _sklearn_Tags


53
__all__ = [
54
55
56
57
    "LGBMClassifier",
    "LGBMModel",
    "LGBMRanker",
    "LGBMRegressor",
58
59
]

60
61
62
63
64
_LGBM_ScikitMatrixLike = Union[
    dt_DataTable,
    List[Union[List[float], List[int]]],
    np.ndarray,
    pd_DataFrame,
65
    scipy.sparse.spmatrix,
66
]
67
_LGBM_ScikitCustomObjectiveFunction = Union[
68
    # f(labels, preds)
69
    Callable[
70
        [Optional[np.ndarray], np.ndarray],
71
        Tuple[np.ndarray, np.ndarray],
72
    ],
73
    # f(labels, preds, weights)
74
    Callable[
75
        [Optional[np.ndarray], np.ndarray, Optional[np.ndarray]],
76
        Tuple[np.ndarray, np.ndarray],
77
    ],
78
    # f(labels, preds, weights, group)
79
    Callable[
80
        [Optional[np.ndarray], np.ndarray, Optional[np.ndarray], Optional[np.ndarray]],
81
        Tuple[np.ndarray, np.ndarray],
82
    ],
83
84
]
_LGBM_ScikitCustomEvalFunction = Union[
85
    # f(labels, preds)
86
    Callable[
87
        [Optional[np.ndarray], np.ndarray],
88
        _LGBM_EvalFunctionResultType,
89
90
    ],
    Callable[
91
        [Optional[np.ndarray], np.ndarray],
92
        List[_LGBM_EvalFunctionResultType],
93
    ],
94
    # f(labels, preds, weights)
95
    Callable[
96
        [Optional[np.ndarray], np.ndarray, Optional[np.ndarray]],
97
        _LGBM_EvalFunctionResultType,
98
    ],
99
100
    Callable[
        [Optional[np.ndarray], np.ndarray, Optional[np.ndarray]],
101
        List[_LGBM_EvalFunctionResultType],
102
103
104
105
    ],
    # f(labels, preds, weights, group)
    Callable[
        [Optional[np.ndarray], np.ndarray, Optional[np.ndarray], Optional[np.ndarray]],
106
        _LGBM_EvalFunctionResultType,
107
108
109
    ],
    Callable[
        [Optional[np.ndarray], np.ndarray, Optional[np.ndarray], Optional[np.ndarray]],
110
111
        List[_LGBM_EvalFunctionResultType],
    ],
112
]
113
114
115
_LGBM_ScikitEvalMetricType = Union[
    str,
    _LGBM_ScikitCustomEvalFunction,
116
    List[Union[str, _LGBM_ScikitCustomEvalFunction]],
117
]
118
_LGBM_ScikitValidSet = Tuple[_LGBM_ScikitMatrixLike, _LGBM_LabelType]
119

wxchan's avatar
wxchan committed
120

121
122
123
124
125
126
def _get_group_from_constructed_dataset(dataset: Dataset) -> Optional[np.ndarray]:
    group = dataset.get_group()
    error_msg = (
        "Estimators in lightgbm.sklearn should only retrieve query groups from a constructed Dataset. "
        "If you're seeing this message, it's a bug in lightgbm. Please report it at https://github.com/microsoft/LightGBM/issues."
    )
127
    assert group is None or isinstance(group, np.ndarray), error_msg
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
    return group


def _get_label_from_constructed_dataset(dataset: Dataset) -> np.ndarray:
    label = dataset.get_label()
    error_msg = (
        "Estimators in lightgbm.sklearn should only retrieve labels from a constructed Dataset. "
        "If you're seeing this message, it's a bug in lightgbm. Please report it at https://github.com/microsoft/LightGBM/issues."
    )
    assert isinstance(label, np.ndarray), error_msg
    return label


def _get_weight_from_constructed_dataset(dataset: Dataset) -> Optional[np.ndarray]:
    weight = dataset.get_weight()
    error_msg = (
        "Estimators in lightgbm.sklearn should only retrieve weights from a constructed Dataset. "
        "If you're seeing this message, it's a bug in lightgbm. Please report it at https://github.com/microsoft/LightGBM/issues."
    )
147
    assert weight is None or isinstance(weight, np.ndarray), error_msg
148
149
150
    return weight


151
class _ObjectiveFunctionWrapper:
152
    """Proxy class for objective function."""
153

154
    def __init__(self, func: _LGBM_ScikitCustomObjectiveFunction):
155
        """Construct a proxy class.
156

157
158
        This class transforms objective function to match objective function with signature ``new_func(preds, dataset)``
        as expected by ``lightgbm.engine.train``.
159

160
161
162
        Parameters
        ----------
        func : callable
163
164
165
166
            Expects a callable with following signatures:
            ``func(y_true, y_pred)``,
            ``func(y_true, y_pred, weight)``
            or ``func(y_true, y_pred, weight, group)``
167
168
            and returns (grad, hess):

169
                y_true : numpy 1-D array of shape = [n_samples]
170
                    The target values.
171
                y_pred : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
172
                    The predicted values.
173
174
                    Predicted values are returned before any transformation,
                    e.g. they are raw margin instead of probability of positive class for binary task.
175
176
                weight : numpy 1-D array of shape = [n_samples]
                    The weight of samples. Weights should be non-negative.
177
                group : numpy 1-D array
178
179
180
                    Group/query data.
                    Only used in the learning-to-rank task.
                    sum(group) = n_samples.
181
182
                    For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
                    where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
183
                grad : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape [n_samples, n_classes] (for multi-class task)
184
185
                    The value of the first order derivative (gradient) of the loss
                    with respect to the elements of y_pred for each sample point.
186
                hess : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
187
188
                    The value of the second order derivative (Hessian) of the loss
                    with respect to the elements of y_pred for each sample point.
wxchan's avatar
wxchan committed
189

Nikita Titov's avatar
Nikita Titov committed
190
191
        .. note::

192
            For multi-class task, y_pred is a numpy 2-D array of shape = [n_samples, n_classes],
193
            and grad and hess should be returned in the same format.
194
195
        """
        self.func = func
wxchan's avatar
wxchan committed
196

197
198
199
200
201
    def __call__(
        self,
        preds: np.ndarray,
        dataset: Dataset,
    ) -> Tuple[np.ndarray, np.ndarray]:
202
203
204
205
        """Call passed function with appropriate arguments.

        Parameters
        ----------
206
        preds : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
207
208
209
210
211
212
            The predicted values.
        dataset : Dataset
            The training dataset.

        Returns
        -------
213
        grad : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
214
215
            The value of the first order derivative (gradient) of the loss
            with respect to the elements of preds for each sample point.
216
        hess : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
217
218
            The value of the second order derivative (Hessian) of the loss
            with respect to the elements of preds for each sample point.
219
        """
220
        labels = _get_label_from_constructed_dataset(dataset)
221
        argc = len(signature(self.func).parameters)
222
        if argc == 2:
223
            grad, hess = self.func(labels, preds)  # type: ignore[call-arg]
224
225
226
227
228
229
230
231
232
233
234
235
            return grad, hess

        weight = _get_weight_from_constructed_dataset(dataset)
        if argc == 3:
            grad, hess = self.func(labels, preds, weight)  # type: ignore[call-arg]
            return grad, hess

        if argc == 4:
            group = _get_group_from_constructed_dataset(dataset)
            return self.func(labels, preds, weight, group)  # type: ignore[call-arg]

        raise TypeError(f"Self-defined objective function should have 2, 3 or 4 arguments, got {argc}")
wxchan's avatar
wxchan committed
236

wxchan's avatar
wxchan committed
237

238
class _EvalFunctionWrapper:
239
    """Proxy class for evaluation function."""
240

241
    def __init__(self, func: _LGBM_ScikitCustomEvalFunction):
242
        """Construct a proxy class.
243

244
245
        This class transforms evaluation function to match evaluation function with signature ``new_func(preds, dataset)``
        as expected by ``lightgbm.engine.train``.
246

247
248
249
250
251
252
253
254
255
256
        Parameters
        ----------
        func : callable
            Expects a callable with following signatures:
            ``func(y_true, y_pred)``,
            ``func(y_true, y_pred, weight)``
            or ``func(y_true, y_pred, weight, group)``
            and returns (eval_name, eval_result, is_higher_better) or
            list of (eval_name, eval_result, is_higher_better):

257
                y_true : numpy 1-D array of shape = [n_samples]
258
                    The target values.
259
                y_pred : numpy 1-D array of shape = [n_samples] or numpy 2-D array shape = [n_samples, n_classes] (for multi-class task)
260
                    The predicted values.
261
262
                    In case of custom ``objective``, predicted values are returned before any transformation,
                    e.g. they are raw margin instead of probability of positive class for binary task in this case.
263
                weight : numpy 1-D array of shape = [n_samples]
264
                    The weight of samples. Weights should be non-negative.
265
                group : numpy 1-D array
266
267
268
                    Group/query data.
                    Only used in the learning-to-rank task.
                    sum(group) = n_samples.
269
270
                    For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
                    where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
271
                eval_name : str
Andrew Ziem's avatar
Andrew Ziem committed
272
                    The name of evaluation function (without whitespace).
273
274
275
276
277
278
                eval_result : float
                    The eval result.
                is_higher_better : bool
                    Is eval result higher better, e.g. AUC is ``is_higher_better``.
        """
        self.func = func
279

280
281
282
    def __call__(
        self,
        preds: np.ndarray,
283
        dataset: Dataset,
284
    ) -> Union[_LGBM_EvalFunctionResultType, List[_LGBM_EvalFunctionResultType]]:
285
        """Call passed function with appropriate arguments.
286

287
288
        Parameters
        ----------
289
        preds : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
290
291
292
293
294
295
            The predicted values.
        dataset : Dataset
            The training dataset.

        Returns
        -------
296
        eval_name : str
Andrew Ziem's avatar
Andrew Ziem committed
297
            The name of evaluation function (without whitespace).
298
299
300
301
302
        eval_result : float
            The eval result.
        is_higher_better : bool
            Is eval result higher better, e.g. AUC is ``is_higher_better``.
        """
303
        labels = _get_label_from_constructed_dataset(dataset)
304
        argc = len(signature(self.func).parameters)
305
        if argc == 2:
306
            return self.func(labels, preds)  # type: ignore[call-arg]
307
308
309
310
311
312
313
314
315
316

        weight = _get_weight_from_constructed_dataset(dataset)
        if argc == 3:
            return self.func(labels, preds, weight)  # type: ignore[call-arg]

        if argc == 4:
            group = _get_group_from_constructed_dataset(dataset)
            return self.func(labels, preds, weight, group)  # type: ignore[call-arg]

        raise TypeError(f"Self-defined eval function should have 2, 3 or 4 arguments, got {argc}")
317

wxchan's avatar
wxchan committed
318

319
320
321
# documentation templates for LGBMModel methods are shared between the classes in
# this module and those in the ``dask`` module

322
_lgbmmodel_doc_fit = """
323
324
325
326
327
328
329
330
331
    Build a gradient boosting model from the training set (X, y).

    Parameters
    ----------
    X : {X_shape}
        Input feature matrix.
    y : {y_shape}
        The target values (class labels in classification, real numbers in regression).
    sample_weight : {sample_weight_shape}
332
        Weights of training data. Weights should be non-negative.
333
    init_score : {init_score_shape}
334
335
336
337
338
339
340
341
342
        Init score of training data.
    group : {group_shape}
        Group/query data.
        Only used in the learning-to-rank task.
        sum(group) = n_samples.
        For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
        where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
    eval_set : list or None, optional (default=None)
        A list of (X, y) tuple pairs to use as validation sets.
343
    eval_names : list of str, or None, optional (default=None)
344
        Names of eval_set.
345
    eval_sample_weight : {eval_sample_weight_shape}
346
        Weights of eval data. Weights should be non-negative.
347
348
    eval_class_weight : list or None, optional (default=None)
        Class weights of eval data.
349
    eval_init_score : {eval_init_score_shape}
350
        Init score of eval data.
351
    eval_group : {eval_group_shape}
352
        Group data of eval data.
353
354
    eval_metric : str, callable, list or None, optional (default=None)
        If str, it should be a built-in evaluation metric to use.
355
356
357
358
        If callable, it should be a custom evaluation metric, see note below for more details.
        If list, it can be a list of built-in metrics, a list of custom evaluation metrics, or a mix of both.
        In either case, the ``metric`` from the model parameters will be evaluated and used as well.
        Default: 'l2' for LGBMRegressor, 'logloss' for LGBMClassifier, 'ndcg' for LGBMRanker.
359
    feature_name : list of str, or 'auto', optional (default='auto')
360
361
        Feature names.
        If 'auto' and data is pandas DataFrame, data columns names are used.
362
    categorical_feature : list of str or int, or 'auto', optional (default='auto')
363
364
        Categorical features.
        If list of int, interpreted as indices.
365
        If list of str, interpreted as feature names (need to specify ``feature_name`` as well).
366
        If 'auto' and data is pandas DataFrame, pandas unordered categorical columns are used.
367
        All values in categorical features will be cast to int32 and thus should be less than int32 max value (2147483647).
368
369
370
        Large values could be memory consuming. Consider using consecutive integers starting from zero.
        All negative values in categorical features will be treated as missing values.
        The output cannot be monotonically constrained with respect to a categorical feature.
371
        Floating point numbers in categorical features will be rounded towards 0.
372
    callbacks : list of callable, or None, optional (default=None)
373
374
        List of callback functions that are applied at each iteration.
        See Callbacks in Python API for more information.
375
    init_model : str, pathlib.Path, Booster, LGBMModel or None, optional (default=None)
376
377
378
379
        Filename of LightGBM model, Booster instance or LGBMModel instance used for continue training.

    Returns
    -------
380
    self : LGBMModel
381
382
383
384
385
386
387
388
389
390
391
392
        Returns self.
    """

_lgbmmodel_doc_custom_eval_note = """
    Note
    ----
    Custom eval function expects a callable with following signatures:
    ``func(y_true, y_pred)``, ``func(y_true, y_pred, weight)`` or
    ``func(y_true, y_pred, weight, group)``
    and returns (eval_name, eval_result, is_higher_better) or
    list of (eval_name, eval_result, is_higher_better):

393
        y_true : numpy 1-D array of shape = [n_samples]
394
            The target values.
395
        y_pred : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
396
            The predicted values.
397
398
            In case of custom ``objective``, predicted values are returned before any transformation,
            e.g. they are raw margin instead of probability of positive class for binary task in this case.
399
        weight : numpy 1-D array of shape = [n_samples]
400
            The weight of samples. Weights should be non-negative.
401
        group : numpy 1-D array
402
403
404
405
406
            Group/query data.
            Only used in the learning-to-rank task.
            sum(group) = n_samples.
            For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
            where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
407
        eval_name : str
Andrew Ziem's avatar
Andrew Ziem committed
408
            The name of evaluation function (without whitespace).
409
410
411
412
413
414
        eval_result : float
            The eval result.
        is_higher_better : bool
            Is eval result higher better, e.g. AUC is ``is_higher_better``.
"""

415
_lgbmmodel_doc_predict = """
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
    {description}

    Parameters
    ----------
    X : {X_shape}
        Input features matrix.
    raw_score : bool, optional (default=False)
        Whether to predict raw scores.
    start_iteration : int, optional (default=0)
        Start index of the iteration to predict.
        If <= 0, starts from the first iteration.
    num_iteration : int or None, optional (default=None)
        Total number of iterations used in the prediction.
        If None, if the best iteration exists and start_iteration <= 0, the best iteration is used;
        otherwise, all iterations from ``start_iteration`` are used (no limits).
        If <= 0, all iterations from ``start_iteration`` are used (no limits).
    pred_leaf : bool, optional (default=False)
        Whether to predict leaf index.
    pred_contrib : bool, optional (default=False)
        Whether to predict feature contributions.

        .. note::

            If you want to get more explanations for your model's predictions using SHAP values,
            like SHAP interaction values,
            you can install the shap package (https://github.com/slundberg/shap).
            Note that unlike the shap package, with ``pred_contrib`` we return a matrix with an extra
            column, where the last column is the expected value.

445
446
447
    validate_features : bool, optional (default=False)
        If True, ensure that the features used to predict match the ones used to train.
        Used only if data is pandas DataFrame.
448
449
450
451
452
453
454
455
456
457
458
459
460
461
    **kwargs
        Other parameters for the prediction.

    Returns
    -------
    {output_name} : {predicted_result_shape}
        The predicted values.
    X_leaves : {X_leaves_shape}
        If ``pred_leaf=True``, the predicted leaf of every tree for each sample.
    X_SHAP_values : {X_SHAP_values_shape}
        If ``pred_contrib=True``, the feature contributions for each sample.
    """


462
463
464
465
466
467
468
469
470
471
472
473
474
def _extract_evaluation_meta_data(
    *,
    collection: Optional[Union[Dict[Any, Any], List[Any]]],
    name: str,
    i: int,
) -> Optional[Any]:
    """Try to extract the ith element of one of the ``eval_*`` inputs."""
    if collection is None:
        return None
    elif isinstance(collection, list):
        # It's possible, for example, to pass 3 eval sets through `eval_set`,
        # but only 1 init_score through `eval_init_score`.
        #
475
        # This if-else accounts for that possibility.
476
477
478
479
480
481
482
483
484
485
        if len(collection) > i:
            return collection[i]
        else:
            return None
    elif isinstance(collection, dict):
        return collection.get(i, None)
    else:
        raise TypeError(f"{name} should be dict or list")


486
487
class LGBMModel(_LGBMModelBase):
    """Implementation of the scikit-learn API for LightGBM."""
wxchan's avatar
wxchan committed
488

489
490
    def __init__(
        self,
491
        boosting_type: str = "gbdt",
492
493
494
495
496
        num_leaves: int = 31,
        max_depth: int = -1,
        learning_rate: float = 0.1,
        n_estimators: int = 100,
        subsample_for_bin: int = 200000,
497
        objective: Optional[Union[str, _LGBM_ScikitCustomObjectiveFunction]] = None,
498
        class_weight: Optional[Union[Dict, str]] = None,
499
        min_split_gain: float = 0.0,
500
501
        min_child_weight: float = 1e-3,
        min_child_samples: int = 20,
502
        subsample: float = 1.0,
503
        subsample_freq: int = 0,
504
505
506
        colsample_bytree: float = 1.0,
        reg_alpha: float = 0.0,
        reg_lambda: float = 0.0,
507
        random_state: Optional[Union[int, np.random.RandomState, np.random.Generator]] = None,
508
        n_jobs: Optional[int] = None,
509
        importance_type: str = "split",
510
        **kwargs: Any,
511
    ):
512
        r"""Construct a gradient boosting model.
wxchan's avatar
wxchan committed
513
514
515

        Parameters
        ----------
516
        boosting_type : str, optional (default='gbdt')
517
518
519
520
            'gbdt', traditional Gradient Boosting Decision Tree.
            'dart', Dropouts meet Multiple Additive Regression Trees.
            'rf', Random Forest.
        num_leaves : int, optional (default=31)
wxchan's avatar
wxchan committed
521
            Maximum tree leaves for base learners.
522
        max_depth : int, optional (default=-1)
523
            Maximum tree depth for base learners, <=0 means no limit.
524
            If setting this to a positive value, consider also changing ``num_leaves`` to ``<= 2^max_depth``.
525
        learning_rate : float, optional (default=0.1)
526
            Boosting learning rate.
527
528
529
            You can use ``callbacks`` parameter of ``fit`` method to shrink/adapt learning rate
            in training using ``reset_parameter`` callback.
            Note, that this will ignore the ``learning_rate`` argument in training.
530
        n_estimators : int, optional (default=100)
wxchan's avatar
wxchan committed
531
            Number of boosted trees to fit.
532
        subsample_for_bin : int, optional (default=200000)
wxchan's avatar
wxchan committed
533
            Number of samples for constructing bins.
534
        objective : str, callable or None, optional (default=None)
wxchan's avatar
wxchan committed
535
536
            Specify the learning task and the corresponding learning objective or
            a custom objective function to be used (see note below).
537
            Default: 'regression' for LGBMRegressor, 'binary' or 'multiclass' for LGBMClassifier, 'lambdarank' for LGBMRanker.
538
539
540
541
        class_weight : dict, 'balanced' or None, optional (default=None)
            Weights associated with classes in the form ``{class_label: weight}``.
            Use this parameter only for multi-class classification task;
            for binary classification task you may use ``is_unbalance`` or ``scale_pos_weight`` parameters.
542
543
544
            Note, that the usage of all these parameters will result in poor estimates of the individual class probabilities.
            You may want to consider performing probability calibration
            (https://scikit-learn.org/stable/modules/calibration.html) of your model.
545
546
547
            The 'balanced' mode uses the values of y to automatically adjust weights
            inversely proportional to class frequencies in the input data as ``n_samples / (n_classes * np.bincount(y))``.
            If None, all classes are supposed to have weight one.
548
            Note, that these weights will be multiplied with ``sample_weight`` (passed through the ``fit`` method)
549
            if ``sample_weight`` is specified.
550
        min_split_gain : float, optional (default=0.)
wxchan's avatar
wxchan committed
551
            Minimum loss reduction required to make a further partition on a leaf node of the tree.
552
        min_child_weight : float, optional (default=1e-3)
553
            Minimum sum of instance weight (Hessian) needed in a child (leaf).
554
        min_child_samples : int, optional (default=20)
555
            Minimum number of data needed in a child (leaf).
556
        subsample : float, optional (default=1.)
wxchan's avatar
wxchan committed
557
            Subsample ratio of the training instance.
558
        subsample_freq : int, optional (default=0)
Andrew Ziem's avatar
Andrew Ziem committed
559
            Frequency of subsample, <=0 means no enable.
560
        colsample_bytree : float, optional (default=1.)
wxchan's avatar
wxchan committed
561
            Subsample ratio of columns when constructing each tree.
562
        reg_alpha : float, optional (default=0.)
563
            L1 regularization term on weights.
564
        reg_lambda : float, optional (default=0.)
565
            L2 regularization term on weights.
566
        random_state : int, RandomState object or None, optional (default=None)
wxchan's avatar
wxchan committed
567
            Random number seed.
568
            If int, this number is used to seed the C++ code.
569
            If RandomState or Generator object (numpy), a random integer is picked based on its state to seed the C++ code.
570
            If None, default seeds in C++ code are used.
571
572
573
574
575
576
577
578
579
580
581
582
        n_jobs : int or None, optional (default=None)
            Number of parallel threads to use for training (can be changed at prediction time by
            passing it as an extra keyword argument).

            For better performance, it is recommended to set this to the number of physical cores
            in the CPU.

            Negative integers are interpreted as following joblib's formula (n_cpus + 1 + n_jobs), just like
            scikit-learn (so e.g. -1 means using all threads). A value of zero corresponds the default number of
            threads configured for OpenMP in the system. A value of ``None`` (the default) corresponds
            to using the number of physical cores in the system (its correct detection requires
            either the ``joblib`` or the ``psutil`` util libraries to be installed).
583
584
585

            .. versionchanged:: 4.0.0

586
        importance_type : str, optional (default='split')
587
            The type of feature importance to be filled into ``feature_importances_``.
588
589
590
591
            If 'split', result contains numbers of times the feature is used in a model.
            If 'gain', result contains total gains of splits which use the feature.
        **kwargs
            Other parameters for the model.
wxchan's avatar
wxchan committed
592
            Check http://lightgbm.readthedocs.io/en/latest/Parameters.html for more parameters.
593

Nikita Titov's avatar
Nikita Titov committed
594
595
596
            .. warning::

                \*\*kwargs is not supported in sklearn, it may cause unexpected issues.
wxchan's avatar
wxchan committed
597
598
599

        Note
        ----
600
601
        A custom objective function can be provided for the ``objective`` parameter.
        In this case, it should have the signature
602
603
604
        ``objective(y_true, y_pred) -> grad, hess``,
        ``objective(y_true, y_pred, weight) -> grad, hess``
        or ``objective(y_true, y_pred, weight, group) -> grad, hess``:
wxchan's avatar
wxchan committed
605

606
            y_true : numpy 1-D array of shape = [n_samples]
607
                The target values.
608
            y_pred : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
609
                The predicted values.
610
611
                Predicted values are returned before any transformation,
                e.g. they are raw margin instead of probability of positive class for binary task.
612
613
            weight : numpy 1-D array of shape = [n_samples]
                The weight of samples. Weights should be non-negative.
614
            group : numpy 1-D array
615
616
617
                Group/query data.
                Only used in the learning-to-rank task.
                sum(group) = n_samples.
618
619
                For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
                where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
620
            grad : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
621
622
                The value of the first order derivative (gradient) of the loss
                with respect to the elements of y_pred for each sample point.
623
            hess : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
624
625
                The value of the second order derivative (Hessian) of the loss
                with respect to the elements of y_pred for each sample point.
wxchan's avatar
wxchan committed
626

627
        For multi-class task, y_pred is a numpy 2-D array of shape = [n_samples, n_classes],
628
        and grad and hess should be returned in the same format.
wxchan's avatar
wxchan committed
629
        """
wxchan's avatar
wxchan committed
630
        if not SKLEARN_INSTALLED:
631
632
633
634
            raise LightGBMError(
                "scikit-learn is required for lightgbm.sklearn. "
                "You must install scikit-learn and restart your session to use this module."
            )
wxchan's avatar
wxchan committed
635

636
        self.boosting_type = boosting_type
637
        self.objective = objective
wxchan's avatar
wxchan committed
638
639
640
641
        self.num_leaves = num_leaves
        self.max_depth = max_depth
        self.learning_rate = learning_rate
        self.n_estimators = n_estimators
wxchan's avatar
wxchan committed
642
        self.subsample_for_bin = subsample_for_bin
wxchan's avatar
wxchan committed
643
644
645
646
647
648
649
650
        self.min_split_gain = min_split_gain
        self.min_child_weight = min_child_weight
        self.min_child_samples = min_child_samples
        self.subsample = subsample
        self.subsample_freq = subsample_freq
        self.colsample_bytree = colsample_bytree
        self.reg_alpha = reg_alpha
        self.reg_lambda = reg_lambda
651
652
        self.random_state = random_state
        self.n_jobs = n_jobs
653
        self.importance_type = importance_type
654
        self._Booster: Optional[Booster] = None
655
656
        self._evals_result: _EvalResultDict = {}
        self._best_score: _LGBM_BoosterBestScoreType = {}
657
        self._best_iteration: int = -1
658
        self._other_params: Dict[str, Any] = {}
659
        self._objective = objective
660
        self.class_weight = class_weight
661
662
        self._class_weight: Optional[Union[Dict, str]] = None
        self._class_map: Optional[Dict[int, int]] = None
663
664
        self._n_features: int = -1
        self._n_features_in: int = -1
665
        self._classes: Optional[np.ndarray] = None
666
        self._n_classes: int = -1
667
        self.set_params(**kwargs)
wxchan's avatar
wxchan committed
668

669
670
671
672
    # scikit-learn 1.6 introduced an __sklearn__tags() method intended to replace _more_tags().
    # _more_tags() can be removed whenever lightgbm's minimum supported scikit-learn version
    # is >=1.6.
    # ref: https://github.com/microsoft/LightGBM/pull/6651
673
    def _more_tags(self) -> Dict[str, Any]:
674
675
676
677
678
679
680
681
682
        check_sample_weight_str = (
            "In LightGBM, setting a sample's weight to 0 can produce a different result than omitting the sample. "
            "Such samples intentionally still affect count-based measures like 'min_data_in_leaf' "
            "(https://github.com/microsoft/LightGBM/issues/5626#issuecomment-1712706678) and the estimated distribution "
            "of features for Dataset construction (see https://github.com/microsoft/LightGBM/issues/5553)."
        )
        # "check_sample_weight_equivalence" can be removed when lightgbm's
        # minimum supported scikit-learn version is at least 1.6
        # ref: https://github.com/scikit-learn/scikit-learn/pull/30137
683
        return {
684
685
686
687
688
            "allow_nan": True,
            "X_types": ["2darray", "sparse", "1dlabels"],
            "_xfail_checks": {
                "check_no_attributes_set_in_init": "scikit-learn incorrectly asserts that private attributes "
                "cannot be set in __init__: "
689
                "(see https://github.com/microsoft/LightGBM/issues/2628)",
690
691
692
                "check_sample_weight_equivalence": check_sample_weight_str,
                "check_sample_weight_equivalence_on_dense_data": check_sample_weight_str,
                "check_sample_weight_equivalence_on_sparse_data": check_sample_weight_str,
693
            },
694
        }
Nikita Titov's avatar
Nikita Titov committed
695

696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
    @staticmethod
    def _update_sklearn_tags_from_dict(
        *,
        tags: "_sklearn_Tags",
        tags_dict: Dict[str, Any],
    ) -> "_sklearn_Tags":
        """Update ``sklearn.utils.Tags`` inherited from ``scikit-learn`` base classes.

        ``scikit-learn`` 1.6 introduced a dataclass-based interface for estimator tags.
        ref: https://github.com/scikit-learn/scikit-learn/pull/29677

        This method handles updating that instance based on the value in ``self._more_tags()``.
        """
        tags.input_tags.allow_nan = tags_dict["allow_nan"]
        tags.input_tags.sparse = "sparse" in tags_dict["X_types"]
        tags.target_tags.one_d_labels = "1dlabels" in tags_dict["X_types"]
        return tags

    def __sklearn_tags__(self) -> Optional["_sklearn_Tags"]:
        # _LGBMModelBase.__sklearn_tags__() cannot be called unconditionally,
        # because that method isn't defined for scikit-learn<1.6
        if not hasattr(_LGBMModelBase, "__sklearn_tags__"):
            err_msg = (
                "__sklearn_tags__() should not be called when using scikit-learn<1.6. "
                f"Detected version: {_sklearn_version}"
            )
            raise AttributeError(err_msg)

        # take whatever tags are provided by BaseEstimator, then modify
        # them with LightGBM-specific values
        return self._update_sklearn_tags_from_dict(
727
            tags=super().__sklearn_tags__(),
728
729
730
            tags_dict=self._more_tags(),
        )

731
732
733
    def __sklearn_is_fitted__(self) -> bool:
        return getattr(self, "fitted_", False)

734
    def get_params(self, deep: bool = True) -> Dict[str, Any]:
735
736
737
738
739
740
741
742
743
744
745
746
747
        """Get parameters for this estimator.

        Parameters
        ----------
        deep : bool, optional (default=True)
            If True, will return the parameters for this estimator and
            contained subobjects that are estimators.

        Returns
        -------
        params : dict
            Parameter names mapped to their values.
        """
748
        params = super().get_params(deep=deep)
749
        params.update(self._other_params)
wxchan's avatar
wxchan committed
750
751
        return params

752
    def set_params(self, **params: Any) -> "LGBMModel":
753
754
755
756
757
758
759
760
761
762
763
764
        """Set the parameters of this estimator.

        Parameters
        ----------
        **params
            Parameter names with their new values.

        Returns
        -------
        self : object
            Returns self.
        """
wxchan's avatar
wxchan committed
765
766
        for key, value in params.items():
            setattr(self, key, value)
767
768
            if hasattr(self, f"_{key}"):
                setattr(self, f"_{key}", value)
769
            self._other_params[key] = value
wxchan's avatar
wxchan committed
770
        return self
wxchan's avatar
wxchan committed
771

772
773
774
775
776
777
778
779
780
781
782
783
784
785
    def _process_params(self, stage: str) -> Dict[str, Any]:
        """Process the parameters of this estimator based on its type, parameter aliases, etc.

        Parameters
        ----------
        stage : str
            Name of the stage (can be ``fit`` or ``predict``) this method is called from.

        Returns
        -------
        processed_params : dict
            Processed parameter names mapped to their values.
        """
        assert stage in {"fit", "predict"}
786
787
        params = self.get_params()

788
789
        params.pop("objective", None)
        for alias in _ConfigAliases.get("objective"):
790
            if alias in params:
791
                obj = params.pop(alias)
792
                _log_warning(f"Found '{alias}' in params. Will use it instead of 'objective' argument")
793
794
795
796
797
798
799
800
801
802
803
804
805
                if stage == "fit":
                    self._objective = obj
        if stage == "fit":
            if self._objective is None:
                if isinstance(self, LGBMRegressor):
                    self._objective = "regression"
                elif isinstance(self, LGBMClassifier):
                    if self._n_classes > 2:
                        self._objective = "multiclass"
                    else:
                        self._objective = "binary"
                elif isinstance(self, LGBMRanker):
                    self._objective = "lambdarank"
806
                else:
807
                    raise ValueError("Unknown LGBMModel type.")
808
        if callable(self._objective):
809
            if stage == "fit":
810
                params["objective"] = _ObjectiveFunctionWrapper(self._objective)
811
            else:
812
                params["objective"] = "None"
813
        else:
814
            params["objective"] = self._objective
815

816
817
818
        params.pop("importance_type", None)
        params.pop("n_estimators", None)
        params.pop("class_weight", None)
819

820
821
        if isinstance(params["random_state"], np.random.RandomState):
            params["random_state"] = params["random_state"].randint(np.iinfo(np.int32).max)
822
        elif isinstance(params["random_state"], np.random.Generator):
823
            params["random_state"] = int(params["random_state"].integers(np.iinfo(np.int32).max))
824
        if self._n_classes > 2:
825
            for alias in _ConfigAliases.get("num_class"):
826
                params.pop(alias, None)
827
828
            params["num_class"] = self._n_classes
        if hasattr(self, "_eval_at"):
829
            eval_at = self._eval_at
830
            for alias in _ConfigAliases.get("eval_at"):
831
832
833
                if alias in params:
                    _log_warning(f"Found '{alias}' in params. Will use it instead of 'eval_at' argument")
                    eval_at = params.pop(alias)
834
            params["eval_at"] = eval_at
wxchan's avatar
wxchan committed
835

836
        # register default metric for consistency with callable eval_metric case
837
        original_metric = self._objective if isinstance(self._objective, str) else None
838
839
840
841
842
843
844
845
846
847
        if original_metric is None:
            # try to deduce from class instance
            if isinstance(self, LGBMRegressor):
                original_metric = "l2"
            elif isinstance(self, LGBMClassifier):
                original_metric = "multi_logloss" if self._n_classes > 2 else "binary_logloss"
            elif isinstance(self, LGBMRanker):
                original_metric = "ndcg"

        # overwrite default metric by explicitly set metric
848
        params = _choose_param_value("metric", params, original_metric)
849

850
851
852
853
854
855
        # use joblib conventions for negative n_jobs, just like scikit-learn
        # at predict time, this is handled later due to the order of parameter updates
        if stage == "fit":
            params = _choose_param_value("num_threads", params, self.n_jobs)
            params["num_threads"] = self._process_n_jobs(params["num_threads"])

856
857
        return params

858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
    def _process_n_jobs(self, n_jobs: Optional[int]) -> int:
        """Convert special values of n_jobs to their actual values according to the formulas that apply.

        Parameters
        ----------
        n_jobs : int or None
            The original value of n_jobs, potentially having special values such as 'None' or
            negative integers.

        Returns
        -------
        n_jobs : int
            The value of n_jobs with special values converted to actual number of threads.
        """
        if n_jobs is None:
            n_jobs = _LGBMCpuCount(only_physical_cores=True)
        elif n_jobs < 0:
            n_jobs = max(_LGBMCpuCount(only_physical_cores=False) + 1 + n_jobs, 1)
        return n_jobs

878
879
    def fit(
        self,
880
881
        X: _LGBM_ScikitMatrixLike,
        y: _LGBM_LabelType,
882
883
        sample_weight: Optional[_LGBM_WeightType] = None,
        init_score: Optional[_LGBM_InitScoreType] = None,
884
        group: Optional[_LGBM_GroupType] = None,
885
        eval_set: Optional[List[_LGBM_ScikitValidSet]] = None,
886
        eval_names: Optional[List[str]] = None,
887
888
889
890
        eval_sample_weight: Optional[List[_LGBM_WeightType]] = None,
        eval_class_weight: Optional[List[float]] = None,
        eval_init_score: Optional[List[_LGBM_InitScoreType]] = None,
        eval_group: Optional[List[_LGBM_GroupType]] = None,
891
        eval_metric: Optional[_LGBM_ScikitEvalMetricType] = None,
892
893
        feature_name: _LGBM_FeatureNameConfiguration = "auto",
        categorical_feature: _LGBM_CategoricalFeatureConfiguration = "auto",
894
        callbacks: Optional[List[Callable]] = None,
895
        init_model: Optional[Union[str, Path, Booster, "LGBMModel"]] = None,
896
    ) -> "LGBMModel":
897
898
899
900
901
        """Docstring is set after definition, using a template."""
        params = self._process_params(stage="fit")

        # Do not modify original args in fit function
        # Refer to https://github.com/microsoft/LightGBM/pull/2619
902
903
904
905
906
907
908
        eval_metric_list: List[Union[str, _LGBM_ScikitCustomEvalFunction]]
        if eval_metric is None:
            eval_metric_list = []
        elif isinstance(eval_metric, list):
            eval_metric_list = copy.deepcopy(eval_metric)
        else:
            eval_metric_list = [copy.deepcopy(eval_metric)]
909
910
911
912
913

        # Separate built-in from callable evaluation metrics
        eval_metrics_callable = [_EvalFunctionWrapper(f) for f in eval_metric_list if callable(f)]
        eval_metrics_builtin = [m for m in eval_metric_list if isinstance(m, str)]

914
        # concatenate metric from params (or default if not provided in params) and eval_metric
915
916
917
        params["metric"] = [params["metric"]] if isinstance(params["metric"], (str, type(None))) else params["metric"]
        params["metric"] = [e for e in eval_metrics_builtin if e not in params["metric"]] + params["metric"]
        params["metric"] = [metric for metric in params["metric"] if metric is not None]
wxchan's avatar
wxchan committed
918

919
        if not isinstance(X, (pd_DataFrame, dt_DataTable)):
920
921
922
923
924
925
926
927
928
929
930
931
            _X, _y = _LGBMValidateData(
                self,
                X,
                y,
                reset=True,
                # allow any input type (this validation is done further down, in lgb.Dataset())
                accept_sparse=True,
                # do not raise an error if Inf of NaN values are found (LightGBM handles these internally)
                ensure_all_finite=False,
                # raise an error on 0-row and 1-row inputs
                ensure_min_samples=2,
            )
932
933
            if sample_weight is not None:
                sample_weight = _LGBMCheckSampleWeight(sample_weight, _X)
934
935
        else:
            _X, _y = X, y
936

937
938
939
            # for other data types, setting n_features_in_ is handled by _LGBMValidateData() in the branch above
            self.n_features_in_ = _X.shape[1]

940
941
942
943
        if self._class_weight is None:
            self._class_weight = self.class_weight
        if self._class_weight is not None:
            class_sample_weight = _LGBMComputeSampleWeight(self._class_weight, y)
944
945
946
947
            if sample_weight is None or len(sample_weight) == 0:
                sample_weight = class_sample_weight
            else:
                sample_weight = np.multiply(sample_weight, class_sample_weight)
948

949
950
951
952
953
954
955
        train_set = Dataset(
            data=_X,
            label=_y,
            weight=sample_weight,
            group=group,
            init_score=init_score,
            categorical_feature=categorical_feature,
956
            feature_name=feature_name,
957
958
            params=params,
        )
Guolin Ke's avatar
Guolin Ke committed
959

960
        valid_sets: List[Dataset] = []
Guolin Ke's avatar
Guolin Ke committed
961
962
963
964
        if eval_set is not None:
            if isinstance(eval_set, tuple):
                eval_set = [eval_set]
            for i, valid_data in enumerate(eval_set):
965
                # reduce cost for prediction training data
Guolin Ke's avatar
Guolin Ke committed
966
967
968
                if valid_data[0] is X and valid_data[1] is y:
                    valid_set = train_set
                else:
969
970
971
972
973
974
975
976
977
978
                    valid_weight = _extract_evaluation_meta_data(
                        collection=eval_sample_weight,
                        name="eval_sample_weight",
                        i=i,
                    )
                    valid_class_weight = _extract_evaluation_meta_data(
                        collection=eval_class_weight,
                        name="eval_class_weight",
                        i=i,
                    )
979
980
981
982
                    if valid_class_weight is not None:
                        if isinstance(valid_class_weight, dict) and self._class_map is not None:
                            valid_class_weight = {self._class_map[k]: v for k, v in valid_class_weight.items()}
                        valid_class_sample_weight = _LGBMComputeSampleWeight(valid_class_weight, valid_data[1])
983
984
985
986
                        if valid_weight is None or len(valid_weight) == 0:
                            valid_weight = valid_class_sample_weight
                        else:
                            valid_weight = np.multiply(valid_weight, valid_class_sample_weight)
987
988
989
990
991
992
993
994
995
996
                    valid_init_score = _extract_evaluation_meta_data(
                        collection=eval_init_score,
                        name="eval_init_score",
                        i=i,
                    )
                    valid_group = _extract_evaluation_meta_data(
                        collection=eval_group,
                        name="eval_group",
                        i=i,
                    )
997
998
999
1000
1001
1002
1003
1004
1005
                    valid_set = Dataset(
                        data=valid_data[0],
                        label=valid_data[1],
                        weight=valid_weight,
                        group=valid_group,
                        init_score=valid_init_score,
                        categorical_feature="auto",
                        params=params,
                    )
1006

Guolin Ke's avatar
Guolin Ke committed
1007
1008
                valid_sets.append(valid_set)

1009
1010
1011
        if isinstance(init_model, LGBMModel):
            init_model = init_model.booster_

1012
1013
1014
        if callbacks is None:
            callbacks = []
        else:
1015
            callbacks = copy.copy(callbacks)  # don't use deepcopy here to allow non-serializable objects
1016

1017
        evals_result: _EvalResultDict = {}
1018
1019
1020
1021
1022
1023
1024
1025
        callbacks.append(record_evaluation(evals_result))

        self._Booster = train(
            params=params,
            train_set=train_set,
            num_boost_round=self.n_estimators,
            valid_sets=valid_sets,
            valid_names=eval_names,
1026
            feval=eval_metrics_callable,  # type: ignore[arg-type]
1027
            init_model=init_model,
1028
            callbacks=callbacks,
1029
        )
wxchan's avatar
wxchan committed
1030

1031
1032
1033
1034
1035
1036
1037
        # This populates the property self.n_features_, the number of features in the fitted model,
        # and so should only be set after fitting.
        #
        # The related property self._n_features_in, which populates self.n_features_in_,
        # is set BEFORE fitting.
        self._n_features = self._Booster.num_feature()

1038
        self._evals_result = evals_result
1039
        self._best_iteration = self._Booster.best_iteration
1040
        self._best_score = self._Booster.best_score
wxchan's avatar
wxchan committed
1041

1042
1043
        self.fitted_ = True

wxchan's avatar
wxchan committed
1044
        # free dataset
1045
        self._Booster.free_dataset()
wxchan's avatar
wxchan committed
1046
        del train_set, valid_sets
wxchan's avatar
wxchan committed
1047
1048
        return self

1049
1050
    fit.__doc__ = (
        _lgbmmodel_doc_fit.format(
1051
            X_shape="numpy array, pandas DataFrame, H2O DataTable's Frame (deprecated), scipy.sparse, list of lists of int or float of shape = [n_samples, n_features]",
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
            y_shape="numpy array, pandas DataFrame, pandas Series, list of int or float of shape = [n_samples]",
            sample_weight_shape="numpy array, pandas Series, list of int or float of shape = [n_samples] or None, optional (default=None)",
            init_score_shape="numpy array, pandas DataFrame, pandas Series, list of int or float of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task) or shape = [n_samples, n_classes] (for multi-class task) or None, optional (default=None)",
            group_shape="numpy array, pandas Series, list of int or float, or None, optional (default=None)",
            eval_sample_weight_shape="list of array (same types as ``sample_weight`` supports), or None, optional (default=None)",
            eval_init_score_shape="list of array (same types as ``init_score`` supports), or None, optional (default=None)",
            eval_group_shape="list of array (same types as ``group`` supports), or None, optional (default=None)",
        )
        + "\n\n"
        + _lgbmmodel_doc_custom_eval_note
    )
1063

1064
1065
    def predict(
        self,
1066
        X: _LGBM_ScikitMatrixLike,
1067
1068
1069
1070
1071
1072
        raw_score: bool = False,
        start_iteration: int = 0,
        num_iteration: Optional[int] = None,
        pred_leaf: bool = False,
        pred_contrib: bool = False,
        validate_features: bool = False,
1073
        **kwargs: Any,
1074
    ):
1075
        """Docstring is set after definition, using a template."""
1076
        if not self.__sklearn_is_fitted__():
1077
            raise LGBMNotFittedError("Estimator not fitted, call fit before exploiting the model.")
1078
        if not isinstance(X, (pd_DataFrame, dt_DataTable)):
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
            X = _LGBMValidateData(
                self,
                X,
                # 'y' being omitted = run scikit-learn's check_array() instead of check_X_y()
                #
                # Prevent scikit-learn from deleting or modifying attributes like 'feature_names_in_' and 'n_features_in_'.
                # These shouldn't be changed at predict() time.
                reset=False,
                # allow any input type (this validation is done further down, in lgb.Dataset())
                accept_sparse=True,
                # do not raise an error if Inf of NaN values are found (LightGBM handles these internally)
                ensure_all_finite=False,
                # raise an error on 0-row inputs
                ensure_min_samples=1,
1093
            )
1094
        # retrieve original params that possibly can be used in both training and prediction
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
        # and then overwrite them (considering aliases) with params that were passed directly in prediction
        predict_params = self._process_params(stage="predict")
        for alias in _ConfigAliases.get_by_alias(
            "data",
            "X",
            "raw_score",
            "start_iteration",
            "num_iteration",
            "pred_leaf",
            "pred_contrib",
1105
            *kwargs.keys(),
1106
1107
1108
        ):
            predict_params.pop(alias, None)
        predict_params.update(kwargs)
1109
1110
1111

        # number of threads can have values with special meaning which is only applied
        # in the scikit-learn interface, these should not reach the c++ side as-is
1112
1113
        predict_params = _choose_param_value("num_threads", predict_params, self.n_jobs)
        predict_params["num_threads"] = self._process_n_jobs(predict_params["num_threads"])
1114

1115
        return self._Booster.predict(  # type: ignore[union-attr]
1116
1117
1118
1119
1120
1121
1122
1123
            X,
            raw_score=raw_score,
            start_iteration=start_iteration,
            num_iteration=num_iteration,
            pred_leaf=pred_leaf,
            pred_contrib=pred_contrib,
            validate_features=validate_features,
            **predict_params,
1124
        )
wxchan's avatar
wxchan committed
1125

1126
1127
    predict.__doc__ = _lgbmmodel_doc_predict.format(
        description="Return the predicted value for each sample.",
1128
        X_shape="numpy array, pandas DataFrame, H2O DataTable's Frame (deprecated), scipy.sparse, list of lists of int or float of shape = [n_samples, n_features]",
1129
1130
1131
        output_name="predicted_result",
        predicted_result_shape="array-like of shape = [n_samples] or shape = [n_samples, n_classes]",
        X_leaves_shape="array-like of shape = [n_samples, n_trees] or shape = [n_samples, n_trees * n_classes]",
1132
        X_SHAP_values_shape="array-like of shape = [n_samples, n_features + 1] or shape = [n_samples, (n_features + 1) * n_classes] or list with n_classes length of such objects",
1133
1134
    )

1135
    @property
1136
    def n_features_(self) -> int:
1137
        """:obj:`int`: The number of features of fitted model."""
1138
        if not self.__sklearn_is_fitted__():
1139
            raise LGBMNotFittedError("No n_features found. Need to call fit beforehand.")
1140
1141
        return self._n_features

1142
    @property
1143
    def n_features_in_(self) -> int:
1144
        """:obj:`int`: The number of features of fitted model."""
1145
        if not self.__sklearn_is_fitted__():
1146
            raise LGBMNotFittedError("No n_features_in found. Need to call fit beforehand.")
1147
1148
        return self._n_features_in

1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
    @n_features_in_.setter
    def n_features_in_(self, value: int) -> None:
        """Set number of features found in passed-in dataset.

        Starting with ``scikit-learn`` 1.6, ``scikit-learn`` expects to be able to directly
        set this property in functions like ``validate_data()``.

        .. note::

            Do not call ``estimator.n_features_in_ = some_int`` or anything else that invokes
            this method. It is only here for compatibility with ``scikit-learn`` validation
            functions used internally in ``lightgbm``.
        """
        self._n_features_in = value

1164
    @property
1165
    def best_score_(self) -> _LGBM_BoosterBestScoreType:
1166
        """:obj:`dict`: The best score of fitted model."""
1167
        if not self.__sklearn_is_fitted__():
1168
            raise LGBMNotFittedError("No best_score found. Need to call fit beforehand.")
1169
1170
1171
        return self._best_score

    @property
1172
    def best_iteration_(self) -> int:
1173
        """:obj:`int`: The best iteration of fitted model if ``early_stopping()`` callback has been specified."""
1174
        if not self.__sklearn_is_fitted__():
1175
1176
1177
            raise LGBMNotFittedError(
                "No best_iteration found. Need to call fit with early_stopping callback beforehand."
            )
1178
1179
1180
        return self._best_iteration

    @property
1181
    def objective_(self) -> Union[str, _LGBM_ScikitCustomObjectiveFunction]:
1182
        """:obj:`str` or :obj:`callable`: The concrete objective used while fitting this model."""
1183
        if not self.__sklearn_is_fitted__():
1184
            raise LGBMNotFittedError("No objective found. Need to call fit beforehand.")
1185
        return self._objective  # type: ignore[return-value]
1186

1187
1188
1189
1190
1191
1192
    @property
    def n_estimators_(self) -> int:
        """:obj:`int`: True number of boosting iterations performed.

        This might be less than parameter ``n_estimators`` if early stopping was enabled or
        if boosting stopped early due to limits on complexity like ``min_gain_to_split``.
1193

1194
        .. versionadded:: 4.0.0
1195
1196
        """
        if not self.__sklearn_is_fitted__():
1197
            raise LGBMNotFittedError("No n_estimators found. Need to call fit beforehand.")
1198
        return self._Booster.current_iteration()  # type: ignore
1199
1200
1201
1202
1203
1204
1205

    @property
    def n_iter_(self) -> int:
        """:obj:`int`: True number of boosting iterations performed.

        This might be less than parameter ``n_estimators`` if early stopping was enabled or
        if boosting stopped early due to limits on complexity like ``min_gain_to_split``.
1206

1207
        .. versionadded:: 4.0.0
1208
1209
        """
        if not self.__sklearn_is_fitted__():
1210
            raise LGBMNotFittedError("No n_iter found. Need to call fit beforehand.")
1211
        return self._Booster.current_iteration()  # type: ignore
1212

1213
    @property
1214
    def booster_(self) -> Booster:
1215
        """Booster: The underlying Booster of this model."""
1216
        if not self.__sklearn_is_fitted__():
1217
            raise LGBMNotFittedError("No booster found. Need to call fit beforehand.")
1218
        return self._Booster  # type: ignore[return-value]
wxchan's avatar
wxchan committed
1219

1220
    @property
1221
    def evals_result_(self) -> _EvalResultDict:
1222
        """:obj:`dict`: The evaluation results if validation sets have been specified."""
1223
        if not self.__sklearn_is_fitted__():
1224
            raise LGBMNotFittedError("No results found. Need to call fit with eval_set beforehand.")
1225
        return self._evals_result
1226
1227

    @property
1228
    def feature_importances_(self) -> np.ndarray:
1229
        """:obj:`array` of shape = [n_features]: The feature importances (the higher, the more important).
1230

Nikita Titov's avatar
Nikita Titov committed
1231
1232
1233
1234
        .. note::

            ``importance_type`` attribute is passed to the function
            to configure the type of importance values to be extracted.
1235
        """
1236
        if not self.__sklearn_is_fitted__():
1237
            raise LGBMNotFittedError("No feature_importances found. Need to call fit beforehand.")
1238
        return self._Booster.feature_importance(importance_type=self.importance_type)  # type: ignore[union-attr]
wxchan's avatar
wxchan committed
1239

1240
    @property
1241
    def feature_name_(self) -> List[str]:
1242
1243
1244
1245
1246
1247
        """:obj:`list` of shape = [n_features]: The names of features.

        .. note::

            If input does not contain feature names, they will be added during fitting in the format ``Column_0``, ``Column_1``, ..., ``Column_N``.
        """
1248
        if not self.__sklearn_is_fitted__():
1249
            raise LGBMNotFittedError("No feature_name found. Need to call fit beforehand.")
1250
        return self._Booster.feature_name()  # type: ignore[union-attr]
1251

1252
1253
    @property
    def feature_names_in_(self) -> np.ndarray:
James Lamb's avatar
James Lamb committed
1254
1255
1256
1257
        """:obj:`array` of shape = [n_features]: scikit-learn compatible version of ``.feature_name_``.

        .. versionadded:: 4.5.0
        """
1258
1259
1260
1261
        if not self.__sklearn_is_fitted__():
            raise LGBMNotFittedError("No feature_names_in_ found. Need to call fit beforehand.")
        return np.array(self.feature_name_)

1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
    @feature_names_in_.deleter
    def feature_names_in_(self) -> None:
        """Intercept calls to delete ``feature_names_in_``.

        Some code paths in ``scikit-learn`` try to delete the ``feature_names_in_`` attribute
        on estimators when a new training dataset that doesn't have features is passed.
        LightGBM automatically assigns feature names to such datasets
        (like ``Column_0``, ``Column_1``, etc.) and so does not want that behavior.

        However, that behavior is coupled to ``scikit-learn`` automatically updating
        ``n_features_in_`` in those same code paths, which is necessary for compliance
        with its API (via argument ``reset`` to functions like ``validate_data()`` and
        ``check_array()``).

        .. note::

            Do not call ``del estimator.feature_names_in_`` or anything else that invokes
            this method. It is only here for compatibility with ``scikit-learn`` validation
            functions used internally in ``lightgbm``.
        """
        pass

wxchan's avatar
wxchan committed
1284

1285
class LGBMRegressor(_LGBMRegressorBase, LGBMModel):
1286
    """LightGBM regressor."""
wxchan's avatar
wxchan committed
1287

1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
    def _more_tags(self) -> Dict[str, Any]:
        # handle the case where RegressorMixin possibly provides _more_tags()
        if callable(getattr(_LGBMRegressorBase, "_more_tags", None)):
            tags = _LGBMRegressorBase._more_tags(self)
        else:
            tags = {}
        # override those with LightGBM-specific preferences
        tags.update(LGBMModel._more_tags(self))
        return tags

    def __sklearn_tags__(self) -> "_sklearn_Tags":
1299
        return super().__sklearn_tags__()
1300

1301
    def fit(  # type: ignore[override]
1302
        self,
1303
1304
        X: _LGBM_ScikitMatrixLike,
        y: _LGBM_LabelType,
1305
1306
1307
        sample_weight: Optional[_LGBM_WeightType] = None,
        init_score: Optional[_LGBM_InitScoreType] = None,
        eval_set: Optional[List[_LGBM_ScikitValidSet]] = None,
1308
        eval_names: Optional[List[str]] = None,
1309
1310
        eval_sample_weight: Optional[List[_LGBM_WeightType]] = None,
        eval_init_score: Optional[List[_LGBM_InitScoreType]] = None,
1311
        eval_metric: Optional[_LGBM_ScikitEvalMetricType] = None,
1312
1313
        feature_name: _LGBM_FeatureNameConfiguration = "auto",
        categorical_feature: _LGBM_CategoricalFeatureConfiguration = "auto",
1314
        callbacks: Optional[List[Callable]] = None,
1315
        init_model: Optional[Union[str, Path, Booster, LGBMModel]] = None,
1316
    ) -> "LGBMRegressor":
1317
        """Docstring is inherited from the LGBMModel."""
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
        super().fit(
            X,
            y,
            sample_weight=sample_weight,
            init_score=init_score,
            eval_set=eval_set,
            eval_names=eval_names,
            eval_sample_weight=eval_sample_weight,
            eval_init_score=eval_init_score,
            eval_metric=eval_metric,
            feature_name=feature_name,
            categorical_feature=categorical_feature,
            callbacks=callbacks,
1331
            init_model=init_model,
1332
        )
Guolin Ke's avatar
Guolin Ke committed
1333
1334
        return self

1335
    _base_doc = LGBMModel.fit.__doc__.replace("self : LGBMModel", "self : LGBMRegressor")  # type: ignore
1336
1337
1338
1339
1340
1341
    _base_doc = (
        _base_doc[: _base_doc.find("group :")]  # type: ignore
        + _base_doc[_base_doc.find("eval_set :") :]
    )  # type: ignore
    _base_doc = _base_doc[: _base_doc.find("eval_class_weight :")] + _base_doc[_base_doc.find("eval_init_score :") :]
    fit.__doc__ = _base_doc[: _base_doc.find("eval_group :")] + _base_doc[_base_doc.find("eval_metric :") :]
wxchan's avatar
wxchan committed
1342

1343

1344
class LGBMClassifier(_LGBMClassifierBase, LGBMModel):
1345
    """LightGBM classifier."""
wxchan's avatar
wxchan committed
1346

1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
    def _more_tags(self) -> Dict[str, Any]:
        # handle the case where ClassifierMixin possibly provides _more_tags()
        if callable(getattr(_LGBMClassifierBase, "_more_tags", None)):
            tags = _LGBMClassifierBase._more_tags(self)
        else:
            tags = {}
        # override those with LightGBM-specific preferences
        tags.update(LGBMModel._more_tags(self))
        return tags

    def __sklearn_tags__(self) -> "_sklearn_Tags":
1358
1359
1360
        tags = super().__sklearn_tags__()
        tags.classifier_tags.multi_class = True
        tags.classifier_tags.multi_label = False
1361
        return tags
1362

1363
    def fit(  # type: ignore[override]
1364
        self,
1365
1366
        X: _LGBM_ScikitMatrixLike,
        y: _LGBM_LabelType,
1367
1368
1369
        sample_weight: Optional[_LGBM_WeightType] = None,
        init_score: Optional[_LGBM_InitScoreType] = None,
        eval_set: Optional[List[_LGBM_ScikitValidSet]] = None,
1370
        eval_names: Optional[List[str]] = None,
1371
1372
1373
        eval_sample_weight: Optional[List[_LGBM_WeightType]] = None,
        eval_class_weight: Optional[List[float]] = None,
        eval_init_score: Optional[List[_LGBM_InitScoreType]] = None,
1374
        eval_metric: Optional[_LGBM_ScikitEvalMetricType] = None,
1375
1376
        feature_name: _LGBM_FeatureNameConfiguration = "auto",
        categorical_feature: _LGBM_CategoricalFeatureConfiguration = "auto",
1377
        callbacks: Optional[List[Callable]] = None,
1378
        init_model: Optional[Union[str, Path, Booster, LGBMModel]] = None,
1379
    ) -> "LGBMClassifier":
1380
        """Docstring is inherited from the LGBMModel."""
1381
        _LGBMAssertAllFinite(y)
1382
1383
        _LGBMCheckClassificationTargets(y)
        self._le = _LGBMLabelEncoder().fit(y)
1384
        _y = self._le.transform(y)
1385
        self._class_map = dict(zip(self._le.classes_, self._le.transform(self._le.classes_)))
1386
1387
        if isinstance(self.class_weight, dict):
            self._class_weight = {self._class_map[k]: v for k, v in self.class_weight.items()}
1388

1389
        self._classes = self._le.classes_
1390
        self._n_classes = len(self._classes)  # type: ignore[arg-type]
1391
1392
        if self.objective is None:
            self._objective = None
1393

1394
1395
        # adjust eval metrics to match whether binary or multiclass
        # classification is being performed
1396
        if not callable(eval_metric):
1397
1398
1399
1400
1401
1402
            if isinstance(eval_metric, list):
                eval_metric_list = eval_metric
            elif isinstance(eval_metric, str):
                eval_metric_list = [eval_metric]
            else:
                eval_metric_list = []
1403
            if self.__is_multiclass:
1404
                for index, metric in enumerate(eval_metric_list):
1405
                    if metric in {"logloss", "binary_logloss"}:
1406
                        eval_metric_list[index] = "multi_logloss"
1407
                    elif metric in {"error", "binary_error"}:
1408
                        eval_metric_list[index] = "multi_error"
1409
            else:
1410
                for index, metric in enumerate(eval_metric_list):
1411
1412
1413
1414
                    if metric in {"logloss", "multi_logloss"}:
                        eval_metric_list[index] = "binary_logloss"
                    elif metric in {"error", "multi_error"}:
                        eval_metric_list[index] = "binary_error"
1415
            eval_metric = eval_metric_list
wxchan's avatar
wxchan committed
1416

1417
        # do not modify args, as it causes errors in model selection tools
1418
        valid_sets: Optional[List[_LGBM_ScikitValidSet]] = None
wxchan's avatar
wxchan committed
1419
        if eval_set is not None:
1420
1421
            if isinstance(eval_set, tuple):
                eval_set = [eval_set]
1422
1423
            valid_sets = []
            for valid_x, valid_y in eval_set:
1424
                if valid_x is X and valid_y is y:
1425
                    valid_sets.append((valid_x, _y))
1426
                else:
1427
                    valid_sets.append((valid_x, self._le.transform(valid_y)))
1428

1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
        super().fit(
            X,
            _y,
            sample_weight=sample_weight,
            init_score=init_score,
            eval_set=valid_sets,
            eval_names=eval_names,
            eval_sample_weight=eval_sample_weight,
            eval_class_weight=eval_class_weight,
            eval_init_score=eval_init_score,
            eval_metric=eval_metric,
            feature_name=feature_name,
            categorical_feature=categorical_feature,
            callbacks=callbacks,
1443
            init_model=init_model,
1444
        )
wxchan's avatar
wxchan committed
1445
1446
        return self

1447
    _base_doc = LGBMModel.fit.__doc__.replace("self : LGBMModel", "self : LGBMClassifier")  # type: ignore
1448
1449
1450
1451
1452
    _base_doc = (
        _base_doc[: _base_doc.find("group :")]  # type: ignore
        + _base_doc[_base_doc.find("eval_set :") :]
    )  # type: ignore
    fit.__doc__ = _base_doc[: _base_doc.find("eval_group :")] + _base_doc[_base_doc.find("eval_metric :") :]
1453

1454
1455
    def predict(
        self,
1456
        X: _LGBM_ScikitMatrixLike,
1457
1458
1459
1460
1461
1462
        raw_score: bool = False,
        start_iteration: int = 0,
        num_iteration: Optional[int] = None,
        pred_leaf: bool = False,
        pred_contrib: bool = False,
        validate_features: bool = False,
1463
        **kwargs: Any,
1464
    ):
1465
        """Docstring is inherited from the LGBMModel."""
1466
1467
1468
1469
1470
1471
1472
1473
        result = self.predict_proba(
            X=X,
            raw_score=raw_score,
            start_iteration=start_iteration,
            num_iteration=num_iteration,
            pred_leaf=pred_leaf,
            pred_contrib=pred_contrib,
            validate_features=validate_features,
1474
            **kwargs,
1475
        )
1476
        if callable(self._objective) or raw_score or pred_leaf or pred_contrib:
1477
1478
1479
1480
            return result
        else:
            class_index = np.argmax(result, axis=1)
            return self._le.inverse_transform(class_index)
wxchan's avatar
wxchan committed
1481

1482
1483
    predict.__doc__ = LGBMModel.predict.__doc__

1484
1485
    def predict_proba(
        self,
1486
        X: _LGBM_ScikitMatrixLike,
1487
1488
1489
1490
1491
1492
        raw_score: bool = False,
        start_iteration: int = 0,
        num_iteration: Optional[int] = None,
        pred_leaf: bool = False,
        pred_contrib: bool = False,
        validate_features: bool = False,
1493
        **kwargs: Any,
1494
    ):
1495
        """Docstring is set after definition, using a template."""
1496
1497
1498
1499
1500
1501
1502
1503
        result = super().predict(
            X=X,
            raw_score=raw_score,
            start_iteration=start_iteration,
            num_iteration=num_iteration,
            pred_leaf=pred_leaf,
            pred_contrib=pred_contrib,
            validate_features=validate_features,
1504
            **kwargs,
1505
        )
1506
        if callable(self._objective) and not (raw_score or pred_leaf or pred_contrib):
1507
1508
1509
1510
1511
            _log_warning(
                "Cannot compute class probabilities or labels "
                "due to the usage of customized objective function.\n"
                "Returning raw scores instead."
            )
1512
            return result
1513
        elif self.__is_multiclass or raw_score or pred_leaf or pred_contrib:  # type: ignore [operator]
1514
            return result
wxchan's avatar
wxchan committed
1515
        else:
1516
            return np.vstack((1.0 - result, result)).transpose()
1517

1518
1519
    predict_proba.__doc__ = _lgbmmodel_doc_predict.format(
        description="Return the predicted probability for each class for each sample.",
1520
        X_shape="numpy array, pandas DataFrame, H2O DataTable's Frame (deprecated), scipy.sparse, list of lists of int or float of shape = [n_samples, n_features]",
1521
        output_name="predicted_probability",
1522
        predicted_result_shape="array-like of shape = [n_samples] or shape = [n_samples, n_classes]",
1523
        X_leaves_shape="array-like of shape = [n_samples, n_trees] or shape = [n_samples, n_trees * n_classes]",
1524
        X_SHAP_values_shape="array-like of shape = [n_samples, n_features + 1] or shape = [n_samples, (n_features + 1) * n_classes] or list with n_classes length of such objects",
1525
1526
    )

1527
    @property
1528
    def classes_(self) -> np.ndarray:
1529
        """:obj:`array` of shape = [n_classes]: The class label array."""
1530
        if not self.__sklearn_is_fitted__():
1531
            raise LGBMNotFittedError("No classes found. Need to call fit beforehand.")
1532
        return self._classes  # type: ignore[return-value]
1533
1534

    @property
1535
    def n_classes_(self) -> int:
1536
        """:obj:`int`: The number of classes."""
1537
        if not self.__sklearn_is_fitted__():
1538
            raise LGBMNotFittedError("No classes found. Need to call fit beforehand.")
1539
        return self._n_classes
wxchan's avatar
wxchan committed
1540

1541
1542
1543
1544
1545
    @property
    def __is_multiclass(self) -> bool:
        """:obj:`bool`:  Indicator of whether the classifier is used for multiclass."""
        return self._n_classes > 2 or (isinstance(self._objective, str) and self._objective in _MULTICLASS_OBJECTIVES)

wxchan's avatar
wxchan committed
1546

wxchan's avatar
wxchan committed
1547
class LGBMRanker(LGBMModel):
1548
1549
1550
1551
1552
1553
1554
1555
    """LightGBM ranker.

    .. warning::

        scikit-learn doesn't support ranking applications yet,
        therefore this class is not really compatible with the sklearn ecosystem.
        Please use this class mainly for training and applying ranking models in common sklearnish way.
    """
wxchan's avatar
wxchan committed
1556

1557
    def fit(  # type: ignore[override]
1558
        self,
1559
1560
        X: _LGBM_ScikitMatrixLike,
        y: _LGBM_LabelType,
1561
1562
        sample_weight: Optional[_LGBM_WeightType] = None,
        init_score: Optional[_LGBM_InitScoreType] = None,
1563
        group: Optional[_LGBM_GroupType] = None,
1564
        eval_set: Optional[List[_LGBM_ScikitValidSet]] = None,
1565
        eval_names: Optional[List[str]] = None,
1566
1567
1568
        eval_sample_weight: Optional[List[_LGBM_WeightType]] = None,
        eval_init_score: Optional[List[_LGBM_InitScoreType]] = None,
        eval_group: Optional[List[_LGBM_GroupType]] = None,
1569
        eval_metric: Optional[_LGBM_ScikitEvalMetricType] = None,
1570
        eval_at: Union[List[int], Tuple[int, ...]] = (1, 2, 3, 4, 5),
1571
1572
        feature_name: _LGBM_FeatureNameConfiguration = "auto",
        categorical_feature: _LGBM_CategoricalFeatureConfiguration = "auto",
1573
        callbacks: Optional[List[Callable]] = None,
1574
        init_model: Optional[Union[str, Path, Booster, LGBMModel]] = None,
1575
    ) -> "LGBMRanker":
1576
        """Docstring is inherited from the LGBMModel."""
1577
        # check group data
Guolin Ke's avatar
Guolin Ke committed
1578
        if group is None:
1579
            raise ValueError("Should set group for ranking task")
wxchan's avatar
wxchan committed
1580
1581

        if eval_set is not None:
Guolin Ke's avatar
Guolin Ke committed
1582
            if eval_group is None:
1583
                raise ValueError("Eval_group cannot be None when eval_set is not None")
Guolin Ke's avatar
Guolin Ke committed
1584
            elif len(eval_group) != len(eval_set):
1585
                raise ValueError("Length of eval_group should be equal to eval_set")
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
            elif (
                isinstance(eval_group, dict)
                and any(i not in eval_group or eval_group[i] is None for i in range(len(eval_group)))
                or isinstance(eval_group, list)
                and any(group is None for group in eval_group)
            ):
                raise ValueError(
                    "Should set group for all eval datasets for ranking task; "
                    "if you use dict, the index should start from 0"
                )
1596

1597
        self._eval_at = eval_at
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
        super().fit(
            X,
            y,
            sample_weight=sample_weight,
            init_score=init_score,
            group=group,
            eval_set=eval_set,
            eval_names=eval_names,
            eval_sample_weight=eval_sample_weight,
            eval_init_score=eval_init_score,
            eval_group=eval_group,
            eval_metric=eval_metric,
            feature_name=feature_name,
            categorical_feature=categorical_feature,
            callbacks=callbacks,
1613
            init_model=init_model,
1614
        )
wxchan's avatar
wxchan committed
1615
        return self
1616

1617
    _base_doc = LGBMModel.fit.__doc__.replace("self : LGBMModel", "self : LGBMRanker")  # type: ignore
1618
1619
1620
1621
    fit.__doc__ = (
        _base_doc[: _base_doc.find("eval_class_weight :")]  # type: ignore
        + _base_doc[_base_doc.find("eval_init_score :") :]
    )  # type: ignore
1622
    _base_doc = fit.__doc__
1623
    _before_feature_name, _feature_name, _after_feature_name = _base_doc.partition("feature_name :")
1624
    fit.__doc__ = f"""{_before_feature_name}eval_at : list or tuple of int, optional (default=(1, 2, 3, 4, 5))
1625
        The evaluation positions of the specified metric.
1626
    {_feature_name}{_after_feature_name}"""