sklearn.py 71.3 KB
Newer Older
wxchan's avatar
wxchan committed
1
# coding: utf-8
2
"""Scikit-learn wrapper interface for LightGBM."""
3

4
import copy
5
from inspect import signature
6
from pathlib import Path
7
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union
8

wxchan's avatar
wxchan committed
9
import numpy as np
10
import scipy.sparse
11

12
from .basic import (
13
    _MULTICLASS_OBJECTIVES,
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
    Booster,
    Dataset,
    LightGBMError,
    _choose_param_value,
    _ConfigAliases,
    _LGBM_BoosterBestScoreType,
    _LGBM_CategoricalFeatureConfiguration,
    _LGBM_EvalFunctionResultType,
    _LGBM_FeatureNameConfiguration,
    _LGBM_GroupType,
    _LGBM_InitScoreType,
    _LGBM_LabelType,
    _LGBM_WeightType,
    _log_warning,
)
29
from .callback import _EvalResultDict, record_evaluation
30
31
32
33
34
35
36
37
38
39
40
41
from .compat import (
    SKLEARN_INSTALLED,
    LGBMNotFittedError,
    _LGBMAssertAllFinite,
    _LGBMCheckClassificationTargets,
    _LGBMCheckSampleWeight,
    _LGBMClassifierBase,
    _LGBMComputeSampleWeight,
    _LGBMCpuCount,
    _LGBMLabelEncoder,
    _LGBMModelBase,
    _LGBMRegressorBase,
42
    _LGBMValidateData,
43
44
    _sklearn_ClassifierTags,
    _sklearn_RegressorTags,
45
    _sklearn_version,
46
47
48
    dt_DataTable,
    pd_DataFrame,
)
wxchan's avatar
wxchan committed
49
from .engine import train
50

51
52
53
54
if TYPE_CHECKING:
    from .compat import _sklearn_Tags


55
__all__ = [
56
57
58
59
    "LGBMClassifier",
    "LGBMModel",
    "LGBMRanker",
    "LGBMRegressor",
60
61
]

62
63
64
65
66
_LGBM_ScikitMatrixLike = Union[
    dt_DataTable,
    List[Union[List[float], List[int]]],
    np.ndarray,
    pd_DataFrame,
67
    scipy.sparse.spmatrix,
68
]
69
_LGBM_ScikitCustomObjectiveFunction = Union[
70
    # f(labels, preds)
71
    Callable[
72
        [Optional[np.ndarray], np.ndarray],
73
        Tuple[np.ndarray, np.ndarray],
74
    ],
75
    # f(labels, preds, weights)
76
    Callable[
77
        [Optional[np.ndarray], np.ndarray, Optional[np.ndarray]],
78
        Tuple[np.ndarray, np.ndarray],
79
    ],
80
    # f(labels, preds, weights, group)
81
    Callable[
82
        [Optional[np.ndarray], np.ndarray, Optional[np.ndarray], Optional[np.ndarray]],
83
        Tuple[np.ndarray, np.ndarray],
84
    ],
85
86
]
_LGBM_ScikitCustomEvalFunction = Union[
87
    # f(labels, preds)
88
    Callable[
89
        [Optional[np.ndarray], np.ndarray],
90
        _LGBM_EvalFunctionResultType,
91
92
    ],
    Callable[
93
        [Optional[np.ndarray], np.ndarray],
94
        List[_LGBM_EvalFunctionResultType],
95
    ],
96
    # f(labels, preds, weights)
97
    Callable[
98
        [Optional[np.ndarray], np.ndarray, Optional[np.ndarray]],
99
        _LGBM_EvalFunctionResultType,
100
    ],
101
102
    Callable[
        [Optional[np.ndarray], np.ndarray, Optional[np.ndarray]],
103
        List[_LGBM_EvalFunctionResultType],
104
105
106
107
    ],
    # f(labels, preds, weights, group)
    Callable[
        [Optional[np.ndarray], np.ndarray, Optional[np.ndarray], Optional[np.ndarray]],
108
        _LGBM_EvalFunctionResultType,
109
110
111
    ],
    Callable[
        [Optional[np.ndarray], np.ndarray, Optional[np.ndarray], Optional[np.ndarray]],
112
113
        List[_LGBM_EvalFunctionResultType],
    ],
114
]
115
116
117
_LGBM_ScikitEvalMetricType = Union[
    str,
    _LGBM_ScikitCustomEvalFunction,
118
    List[Union[str, _LGBM_ScikitCustomEvalFunction]],
119
]
120
_LGBM_ScikitValidSet = Tuple[_LGBM_ScikitMatrixLike, _LGBM_LabelType]
121

wxchan's avatar
wxchan committed
122

123
124
125
126
127
128
def _get_group_from_constructed_dataset(dataset: Dataset) -> Optional[np.ndarray]:
    group = dataset.get_group()
    error_msg = (
        "Estimators in lightgbm.sklearn should only retrieve query groups from a constructed Dataset. "
        "If you're seeing this message, it's a bug in lightgbm. Please report it at https://github.com/microsoft/LightGBM/issues."
    )
129
    assert group is None or isinstance(group, np.ndarray), error_msg
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
    return group


def _get_label_from_constructed_dataset(dataset: Dataset) -> np.ndarray:
    label = dataset.get_label()
    error_msg = (
        "Estimators in lightgbm.sklearn should only retrieve labels from a constructed Dataset. "
        "If you're seeing this message, it's a bug in lightgbm. Please report it at https://github.com/microsoft/LightGBM/issues."
    )
    assert isinstance(label, np.ndarray), error_msg
    return label


def _get_weight_from_constructed_dataset(dataset: Dataset) -> Optional[np.ndarray]:
    weight = dataset.get_weight()
    error_msg = (
        "Estimators in lightgbm.sklearn should only retrieve weights from a constructed Dataset. "
        "If you're seeing this message, it's a bug in lightgbm. Please report it at https://github.com/microsoft/LightGBM/issues."
    )
149
    assert weight is None or isinstance(weight, np.ndarray), error_msg
150
151
152
    return weight


153
class _ObjectiveFunctionWrapper:
154
    """Proxy class for objective function."""
155

156
    def __init__(self, func: _LGBM_ScikitCustomObjectiveFunction):
157
        """Construct a proxy class.
158

159
160
        This class transforms objective function to match objective function with signature ``new_func(preds, dataset)``
        as expected by ``lightgbm.engine.train``.
161

162
163
164
        Parameters
        ----------
        func : callable
165
166
167
168
            Expects a callable with following signatures:
            ``func(y_true, y_pred)``,
            ``func(y_true, y_pred, weight)``
            or ``func(y_true, y_pred, weight, group)``
169
170
            and returns (grad, hess):

171
                y_true : numpy 1-D array of shape = [n_samples]
172
                    The target values.
173
                y_pred : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
174
                    The predicted values.
175
176
                    Predicted values are returned before any transformation,
                    e.g. they are raw margin instead of probability of positive class for binary task.
177
178
                weight : numpy 1-D array of shape = [n_samples]
                    The weight of samples. Weights should be non-negative.
179
                group : numpy 1-D array
180
181
182
                    Group/query data.
                    Only used in the learning-to-rank task.
                    sum(group) = n_samples.
183
184
                    For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
                    where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
185
                grad : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape [n_samples, n_classes] (for multi-class task)
186
187
                    The value of the first order derivative (gradient) of the loss
                    with respect to the elements of y_pred for each sample point.
188
                hess : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
189
190
                    The value of the second order derivative (Hessian) of the loss
                    with respect to the elements of y_pred for each sample point.
wxchan's avatar
wxchan committed
191

Nikita Titov's avatar
Nikita Titov committed
192
193
        .. note::

194
            For multi-class task, y_pred is a numpy 2-D array of shape = [n_samples, n_classes],
195
            and grad and hess should be returned in the same format.
196
197
        """
        self.func = func
wxchan's avatar
wxchan committed
198

199
200
201
202
203
    def __call__(
        self,
        preds: np.ndarray,
        dataset: Dataset,
    ) -> Tuple[np.ndarray, np.ndarray]:
204
205
206
207
        """Call passed function with appropriate arguments.

        Parameters
        ----------
208
        preds : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
209
210
211
212
213
214
            The predicted values.
        dataset : Dataset
            The training dataset.

        Returns
        -------
215
        grad : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
216
217
            The value of the first order derivative (gradient) of the loss
            with respect to the elements of preds for each sample point.
218
        hess : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
219
220
            The value of the second order derivative (Hessian) of the loss
            with respect to the elements of preds for each sample point.
221
        """
222
        labels = _get_label_from_constructed_dataset(dataset)
223
        argc = len(signature(self.func).parameters)
224
        if argc == 2:
225
            grad, hess = self.func(labels, preds)  # type: ignore[call-arg]
226
227
228
229
230
231
232
233
234
235
236
237
            return grad, hess

        weight = _get_weight_from_constructed_dataset(dataset)
        if argc == 3:
            grad, hess = self.func(labels, preds, weight)  # type: ignore[call-arg]
            return grad, hess

        if argc == 4:
            group = _get_group_from_constructed_dataset(dataset)
            return self.func(labels, preds, weight, group)  # type: ignore[call-arg]

        raise TypeError(f"Self-defined objective function should have 2, 3 or 4 arguments, got {argc}")
wxchan's avatar
wxchan committed
238

wxchan's avatar
wxchan committed
239

240
class _EvalFunctionWrapper:
241
    """Proxy class for evaluation function."""
242

243
    def __init__(self, func: _LGBM_ScikitCustomEvalFunction):
244
        """Construct a proxy class.
245

246
247
        This class transforms evaluation function to match evaluation function with signature ``new_func(preds, dataset)``
        as expected by ``lightgbm.engine.train``.
248

249
250
251
252
253
254
255
256
257
258
        Parameters
        ----------
        func : callable
            Expects a callable with following signatures:
            ``func(y_true, y_pred)``,
            ``func(y_true, y_pred, weight)``
            or ``func(y_true, y_pred, weight, group)``
            and returns (eval_name, eval_result, is_higher_better) or
            list of (eval_name, eval_result, is_higher_better):

259
                y_true : numpy 1-D array of shape = [n_samples]
260
                    The target values.
261
                y_pred : numpy 1-D array of shape = [n_samples] or numpy 2-D array shape = [n_samples, n_classes] (for multi-class task)
262
                    The predicted values.
263
264
                    In case of custom ``objective``, predicted values are returned before any transformation,
                    e.g. they are raw margin instead of probability of positive class for binary task in this case.
265
                weight : numpy 1-D array of shape = [n_samples]
266
                    The weight of samples. Weights should be non-negative.
267
                group : numpy 1-D array
268
269
270
                    Group/query data.
                    Only used in the learning-to-rank task.
                    sum(group) = n_samples.
271
272
                    For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
                    where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
273
                eval_name : str
Andrew Ziem's avatar
Andrew Ziem committed
274
                    The name of evaluation function (without whitespace).
275
276
277
278
279
280
                eval_result : float
                    The eval result.
                is_higher_better : bool
                    Is eval result higher better, e.g. AUC is ``is_higher_better``.
        """
        self.func = func
281

282
283
284
    def __call__(
        self,
        preds: np.ndarray,
285
        dataset: Dataset,
286
    ) -> Union[_LGBM_EvalFunctionResultType, List[_LGBM_EvalFunctionResultType]]:
287
        """Call passed function with appropriate arguments.
288

289
290
        Parameters
        ----------
291
        preds : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
292
293
294
295
296
297
            The predicted values.
        dataset : Dataset
            The training dataset.

        Returns
        -------
298
        eval_name : str
Andrew Ziem's avatar
Andrew Ziem committed
299
            The name of evaluation function (without whitespace).
300
301
302
303
304
        eval_result : float
            The eval result.
        is_higher_better : bool
            Is eval result higher better, e.g. AUC is ``is_higher_better``.
        """
305
        labels = _get_label_from_constructed_dataset(dataset)
306
        argc = len(signature(self.func).parameters)
307
        if argc == 2:
308
            return self.func(labels, preds)  # type: ignore[call-arg]
309
310
311
312
313
314
315
316
317
318

        weight = _get_weight_from_constructed_dataset(dataset)
        if argc == 3:
            return self.func(labels, preds, weight)  # type: ignore[call-arg]

        if argc == 4:
            group = _get_group_from_constructed_dataset(dataset)
            return self.func(labels, preds, weight, group)  # type: ignore[call-arg]

        raise TypeError(f"Self-defined eval function should have 2, 3 or 4 arguments, got {argc}")
319

wxchan's avatar
wxchan committed
320

321
322
323
# documentation templates for LGBMModel methods are shared between the classes in
# this module and those in the ``dask`` module

324
_lgbmmodel_doc_fit = """
325
326
327
328
329
330
331
332
333
    Build a gradient boosting model from the training set (X, y).

    Parameters
    ----------
    X : {X_shape}
        Input feature matrix.
    y : {y_shape}
        The target values (class labels in classification, real numbers in regression).
    sample_weight : {sample_weight_shape}
334
        Weights of training data. Weights should be non-negative.
335
    init_score : {init_score_shape}
336
337
338
339
340
341
342
343
344
        Init score of training data.
    group : {group_shape}
        Group/query data.
        Only used in the learning-to-rank task.
        sum(group) = n_samples.
        For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
        where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
    eval_set : list or None, optional (default=None)
        A list of (X, y) tuple pairs to use as validation sets.
345
    eval_names : list of str, or None, optional (default=None)
346
        Names of eval_set.
347
    eval_sample_weight : {eval_sample_weight_shape}
348
        Weights of eval data. Weights should be non-negative.
349
350
    eval_class_weight : list or None, optional (default=None)
        Class weights of eval data.
351
    eval_init_score : {eval_init_score_shape}
352
        Init score of eval data.
353
    eval_group : {eval_group_shape}
354
        Group data of eval data.
355
356
    eval_metric : str, callable, list or None, optional (default=None)
        If str, it should be a built-in evaluation metric to use.
357
358
359
360
        If callable, it should be a custom evaluation metric, see note below for more details.
        If list, it can be a list of built-in metrics, a list of custom evaluation metrics, or a mix of both.
        In either case, the ``metric`` from the model parameters will be evaluated and used as well.
        Default: 'l2' for LGBMRegressor, 'logloss' for LGBMClassifier, 'ndcg' for LGBMRanker.
361
    feature_name : list of str, or 'auto', optional (default='auto')
362
363
        Feature names.
        If 'auto' and data is pandas DataFrame, data columns names are used.
364
    categorical_feature : list of str or int, or 'auto', optional (default='auto')
365
366
        Categorical features.
        If list of int, interpreted as indices.
367
        If list of str, interpreted as feature names (need to specify ``feature_name`` as well).
368
        If 'auto' and data is pandas DataFrame, pandas unordered categorical columns are used.
369
        All values in categorical features will be cast to int32 and thus should be less than int32 max value (2147483647).
370
371
372
        Large values could be memory consuming. Consider using consecutive integers starting from zero.
        All negative values in categorical features will be treated as missing values.
        The output cannot be monotonically constrained with respect to a categorical feature.
373
        Floating point numbers in categorical features will be rounded towards 0.
374
    callbacks : list of callable, or None, optional (default=None)
375
376
        List of callback functions that are applied at each iteration.
        See Callbacks in Python API for more information.
377
    init_model : str, pathlib.Path, Booster, LGBMModel or None, optional (default=None)
378
379
380
381
        Filename of LightGBM model, Booster instance or LGBMModel instance used for continue training.

    Returns
    -------
382
    self : LGBMModel
383
384
385
386
387
388
389
390
391
392
393
394
        Returns self.
    """

_lgbmmodel_doc_custom_eval_note = """
    Note
    ----
    Custom eval function expects a callable with following signatures:
    ``func(y_true, y_pred)``, ``func(y_true, y_pred, weight)`` or
    ``func(y_true, y_pred, weight, group)``
    and returns (eval_name, eval_result, is_higher_better) or
    list of (eval_name, eval_result, is_higher_better):

395
        y_true : numpy 1-D array of shape = [n_samples]
396
            The target values.
397
        y_pred : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
398
            The predicted values.
399
400
            In case of custom ``objective``, predicted values are returned before any transformation,
            e.g. they are raw margin instead of probability of positive class for binary task in this case.
401
        weight : numpy 1-D array of shape = [n_samples]
402
            The weight of samples. Weights should be non-negative.
403
        group : numpy 1-D array
404
405
406
407
408
            Group/query data.
            Only used in the learning-to-rank task.
            sum(group) = n_samples.
            For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
            where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
409
        eval_name : str
Andrew Ziem's avatar
Andrew Ziem committed
410
            The name of evaluation function (without whitespace).
411
412
413
414
415
416
        eval_result : float
            The eval result.
        is_higher_better : bool
            Is eval result higher better, e.g. AUC is ``is_higher_better``.
"""

417
_lgbmmodel_doc_predict = """
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
    {description}

    Parameters
    ----------
    X : {X_shape}
        Input features matrix.
    raw_score : bool, optional (default=False)
        Whether to predict raw scores.
    start_iteration : int, optional (default=0)
        Start index of the iteration to predict.
        If <= 0, starts from the first iteration.
    num_iteration : int or None, optional (default=None)
        Total number of iterations used in the prediction.
        If None, if the best iteration exists and start_iteration <= 0, the best iteration is used;
        otherwise, all iterations from ``start_iteration`` are used (no limits).
        If <= 0, all iterations from ``start_iteration`` are used (no limits).
    pred_leaf : bool, optional (default=False)
        Whether to predict leaf index.
    pred_contrib : bool, optional (default=False)
        Whether to predict feature contributions.

        .. note::

            If you want to get more explanations for your model's predictions using SHAP values,
            like SHAP interaction values,
            you can install the shap package (https://github.com/slundberg/shap).
            Note that unlike the shap package, with ``pred_contrib`` we return a matrix with an extra
            column, where the last column is the expected value.

447
448
449
    validate_features : bool, optional (default=False)
        If True, ensure that the features used to predict match the ones used to train.
        Used only if data is pandas DataFrame.
450
451
452
453
454
455
456
457
458
459
460
461
462
463
    **kwargs
        Other parameters for the prediction.

    Returns
    -------
    {output_name} : {predicted_result_shape}
        The predicted values.
    X_leaves : {X_leaves_shape}
        If ``pred_leaf=True``, the predicted leaf of every tree for each sample.
    X_SHAP_values : {X_SHAP_values_shape}
        If ``pred_contrib=True``, the feature contributions for each sample.
    """


464
465
466
467
468
469
470
471
472
473
474
475
476
def _extract_evaluation_meta_data(
    *,
    collection: Optional[Union[Dict[Any, Any], List[Any]]],
    name: str,
    i: int,
) -> Optional[Any]:
    """Try to extract the ith element of one of the ``eval_*`` inputs."""
    if collection is None:
        return None
    elif isinstance(collection, list):
        # It's possible, for example, to pass 3 eval sets through `eval_set`,
        # but only 1 init_score through `eval_init_score`.
        #
477
        # This if-else accounts for that possibility.
478
479
480
481
482
483
484
485
486
487
        if len(collection) > i:
            return collection[i]
        else:
            return None
    elif isinstance(collection, dict):
        return collection.get(i, None)
    else:
        raise TypeError(f"{name} should be dict or list")


488
489
class LGBMModel(_LGBMModelBase):
    """Implementation of the scikit-learn API for LightGBM."""
wxchan's avatar
wxchan committed
490

491
492
    def __init__(
        self,
493
        boosting_type: str = "gbdt",
494
495
496
497
498
        num_leaves: int = 31,
        max_depth: int = -1,
        learning_rate: float = 0.1,
        n_estimators: int = 100,
        subsample_for_bin: int = 200000,
499
        objective: Optional[Union[str, _LGBM_ScikitCustomObjectiveFunction]] = None,
500
        class_weight: Optional[Union[Dict, str]] = None,
501
        min_split_gain: float = 0.0,
502
503
        min_child_weight: float = 1e-3,
        min_child_samples: int = 20,
504
        subsample: float = 1.0,
505
        subsample_freq: int = 0,
506
507
508
        colsample_bytree: float = 1.0,
        reg_alpha: float = 0.0,
        reg_lambda: float = 0.0,
509
        random_state: Optional[Union[int, np.random.RandomState, np.random.Generator]] = None,
510
        n_jobs: Optional[int] = None,
511
        importance_type: str = "split",
512
        **kwargs: Any,
513
    ):
514
        r"""Construct a gradient boosting model.
wxchan's avatar
wxchan committed
515
516
517

        Parameters
        ----------
518
        boosting_type : str, optional (default='gbdt')
519
520
521
522
            'gbdt', traditional Gradient Boosting Decision Tree.
            'dart', Dropouts meet Multiple Additive Regression Trees.
            'rf', Random Forest.
        num_leaves : int, optional (default=31)
wxchan's avatar
wxchan committed
523
            Maximum tree leaves for base learners.
524
        max_depth : int, optional (default=-1)
525
            Maximum tree depth for base learners, <=0 means no limit.
526
            If setting this to a positive value, consider also changing ``num_leaves`` to ``<= 2^max_depth``.
527
        learning_rate : float, optional (default=0.1)
528
            Boosting learning rate.
529
530
531
            You can use ``callbacks`` parameter of ``fit`` method to shrink/adapt learning rate
            in training using ``reset_parameter`` callback.
            Note, that this will ignore the ``learning_rate`` argument in training.
532
        n_estimators : int, optional (default=100)
wxchan's avatar
wxchan committed
533
            Number of boosted trees to fit.
534
        subsample_for_bin : int, optional (default=200000)
wxchan's avatar
wxchan committed
535
            Number of samples for constructing bins.
536
        objective : str, callable or None, optional (default=None)
wxchan's avatar
wxchan committed
537
538
            Specify the learning task and the corresponding learning objective or
            a custom objective function to be used (see note below).
539
            Default: 'regression' for LGBMRegressor, 'binary' or 'multiclass' for LGBMClassifier, 'lambdarank' for LGBMRanker.
540
541
542
543
        class_weight : dict, 'balanced' or None, optional (default=None)
            Weights associated with classes in the form ``{class_label: weight}``.
            Use this parameter only for multi-class classification task;
            for binary classification task you may use ``is_unbalance`` or ``scale_pos_weight`` parameters.
544
545
546
            Note, that the usage of all these parameters will result in poor estimates of the individual class probabilities.
            You may want to consider performing probability calibration
            (https://scikit-learn.org/stable/modules/calibration.html) of your model.
547
548
549
            The 'balanced' mode uses the values of y to automatically adjust weights
            inversely proportional to class frequencies in the input data as ``n_samples / (n_classes * np.bincount(y))``.
            If None, all classes are supposed to have weight one.
550
            Note, that these weights will be multiplied with ``sample_weight`` (passed through the ``fit`` method)
551
            if ``sample_weight`` is specified.
552
        min_split_gain : float, optional (default=0.)
wxchan's avatar
wxchan committed
553
            Minimum loss reduction required to make a further partition on a leaf node of the tree.
554
        min_child_weight : float, optional (default=1e-3)
555
            Minimum sum of instance weight (Hessian) needed in a child (leaf).
556
        min_child_samples : int, optional (default=20)
557
            Minimum number of data needed in a child (leaf).
558
        subsample : float, optional (default=1.)
wxchan's avatar
wxchan committed
559
            Subsample ratio of the training instance.
560
        subsample_freq : int, optional (default=0)
Andrew Ziem's avatar
Andrew Ziem committed
561
            Frequency of subsample, <=0 means no enable.
562
        colsample_bytree : float, optional (default=1.)
wxchan's avatar
wxchan committed
563
            Subsample ratio of columns when constructing each tree.
564
        reg_alpha : float, optional (default=0.)
565
            L1 regularization term on weights.
566
        reg_lambda : float, optional (default=0.)
567
            L2 regularization term on weights.
568
        random_state : int, RandomState object or None, optional (default=None)
wxchan's avatar
wxchan committed
569
            Random number seed.
570
            If int, this number is used to seed the C++ code.
571
            If RandomState or Generator object (numpy), a random integer is picked based on its state to seed the C++ code.
572
            If None, default seeds in C++ code are used.
573
574
575
576
577
578
579
580
581
582
583
584
        n_jobs : int or None, optional (default=None)
            Number of parallel threads to use for training (can be changed at prediction time by
            passing it as an extra keyword argument).

            For better performance, it is recommended to set this to the number of physical cores
            in the CPU.

            Negative integers are interpreted as following joblib's formula (n_cpus + 1 + n_jobs), just like
            scikit-learn (so e.g. -1 means using all threads). A value of zero corresponds the default number of
            threads configured for OpenMP in the system. A value of ``None`` (the default) corresponds
            to using the number of physical cores in the system (its correct detection requires
            either the ``joblib`` or the ``psutil`` util libraries to be installed).
585
586
587

            .. versionchanged:: 4.0.0

588
        importance_type : str, optional (default='split')
589
            The type of feature importance to be filled into ``feature_importances_``.
590
591
592
593
            If 'split', result contains numbers of times the feature is used in a model.
            If 'gain', result contains total gains of splits which use the feature.
        **kwargs
            Other parameters for the model.
wxchan's avatar
wxchan committed
594
            Check http://lightgbm.readthedocs.io/en/latest/Parameters.html for more parameters.
595

Nikita Titov's avatar
Nikita Titov committed
596
597
598
            .. warning::

                \*\*kwargs is not supported in sklearn, it may cause unexpected issues.
wxchan's avatar
wxchan committed
599
600
601

        Note
        ----
602
603
        A custom objective function can be provided for the ``objective`` parameter.
        In this case, it should have the signature
604
605
606
        ``objective(y_true, y_pred) -> grad, hess``,
        ``objective(y_true, y_pred, weight) -> grad, hess``
        or ``objective(y_true, y_pred, weight, group) -> grad, hess``:
wxchan's avatar
wxchan committed
607

608
            y_true : numpy 1-D array of shape = [n_samples]
609
                The target values.
610
            y_pred : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
611
                The predicted values.
612
613
                Predicted values are returned before any transformation,
                e.g. they are raw margin instead of probability of positive class for binary task.
614
615
            weight : numpy 1-D array of shape = [n_samples]
                The weight of samples. Weights should be non-negative.
616
            group : numpy 1-D array
617
618
619
                Group/query data.
                Only used in the learning-to-rank task.
                sum(group) = n_samples.
620
621
                For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
                where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
622
            grad : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
623
624
                The value of the first order derivative (gradient) of the loss
                with respect to the elements of y_pred for each sample point.
625
            hess : numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
626
627
                The value of the second order derivative (Hessian) of the loss
                with respect to the elements of y_pred for each sample point.
wxchan's avatar
wxchan committed
628

629
        For multi-class task, y_pred is a numpy 2-D array of shape = [n_samples, n_classes],
630
        and grad and hess should be returned in the same format.
wxchan's avatar
wxchan committed
631
        """
wxchan's avatar
wxchan committed
632
        if not SKLEARN_INSTALLED:
633
634
635
636
            raise LightGBMError(
                "scikit-learn is required for lightgbm.sklearn. "
                "You must install scikit-learn and restart your session to use this module."
            )
wxchan's avatar
wxchan committed
637

638
        self.boosting_type = boosting_type
639
        self.objective = objective
wxchan's avatar
wxchan committed
640
641
642
643
        self.num_leaves = num_leaves
        self.max_depth = max_depth
        self.learning_rate = learning_rate
        self.n_estimators = n_estimators
wxchan's avatar
wxchan committed
644
        self.subsample_for_bin = subsample_for_bin
wxchan's avatar
wxchan committed
645
646
647
648
649
650
651
652
        self.min_split_gain = min_split_gain
        self.min_child_weight = min_child_weight
        self.min_child_samples = min_child_samples
        self.subsample = subsample
        self.subsample_freq = subsample_freq
        self.colsample_bytree = colsample_bytree
        self.reg_alpha = reg_alpha
        self.reg_lambda = reg_lambda
653
654
        self.random_state = random_state
        self.n_jobs = n_jobs
655
        self.importance_type = importance_type
656
        self._Booster: Optional[Booster] = None
657
658
        self._evals_result: _EvalResultDict = {}
        self._best_score: _LGBM_BoosterBestScoreType = {}
659
        self._best_iteration: int = -1
660
        self._other_params: Dict[str, Any] = {}
661
        self._objective = objective
662
        self.class_weight = class_weight
663
664
        self._class_weight: Optional[Union[Dict, str]] = None
        self._class_map: Optional[Dict[int, int]] = None
665
666
        self._n_features: int = -1
        self._n_features_in: int = -1
667
        self._classes: Optional[np.ndarray] = None
668
        self._n_classes: int = -1
669
        self.set_params(**kwargs)
wxchan's avatar
wxchan committed
670

671
672
673
674
    # scikit-learn 1.6 introduced an __sklearn__tags() method intended to replace _more_tags().
    # _more_tags() can be removed whenever lightgbm's minimum supported scikit-learn version
    # is >=1.6.
    # ref: https://github.com/microsoft/LightGBM/pull/6651
675
    def _more_tags(self) -> Dict[str, Any]:
676
677
678
679
680
681
682
683
684
        check_sample_weight_str = (
            "In LightGBM, setting a sample's weight to 0 can produce a different result than omitting the sample. "
            "Such samples intentionally still affect count-based measures like 'min_data_in_leaf' "
            "(https://github.com/microsoft/LightGBM/issues/5626#issuecomment-1712706678) and the estimated distribution "
            "of features for Dataset construction (see https://github.com/microsoft/LightGBM/issues/5553)."
        )
        # "check_sample_weight_equivalence" can be removed when lightgbm's
        # minimum supported scikit-learn version is at least 1.6
        # ref: https://github.com/scikit-learn/scikit-learn/pull/30137
685
        return {
686
687
688
689
690
            "allow_nan": True,
            "X_types": ["2darray", "sparse", "1dlabels"],
            "_xfail_checks": {
                "check_no_attributes_set_in_init": "scikit-learn incorrectly asserts that private attributes "
                "cannot be set in __init__: "
691
                "(see https://github.com/microsoft/LightGBM/issues/2628)",
692
693
694
                "check_sample_weight_equivalence": check_sample_weight_str,
                "check_sample_weight_equivalence_on_dense_data": check_sample_weight_str,
                "check_sample_weight_equivalence_on_sparse_data": check_sample_weight_str,
695
            },
696
        }
Nikita Titov's avatar
Nikita Titov committed
697

698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
    @staticmethod
    def _update_sklearn_tags_from_dict(
        *,
        tags: "_sklearn_Tags",
        tags_dict: Dict[str, Any],
    ) -> "_sklearn_Tags":
        """Update ``sklearn.utils.Tags`` inherited from ``scikit-learn`` base classes.

        ``scikit-learn`` 1.6 introduced a dataclass-based interface for estimator tags.
        ref: https://github.com/scikit-learn/scikit-learn/pull/29677

        This method handles updating that instance based on the value in ``self._more_tags()``.
        """
        tags.input_tags.allow_nan = tags_dict["allow_nan"]
        tags.input_tags.sparse = "sparse" in tags_dict["X_types"]
        tags.target_tags.one_d_labels = "1dlabels" in tags_dict["X_types"]
        return tags

    def __sklearn_tags__(self) -> Optional["_sklearn_Tags"]:
        # _LGBMModelBase.__sklearn_tags__() cannot be called unconditionally,
        # because that method isn't defined for scikit-learn<1.6
        if not hasattr(_LGBMModelBase, "__sklearn_tags__"):
            err_msg = (
                "__sklearn_tags__() should not be called when using scikit-learn<1.6. "
                f"Detected version: {_sklearn_version}"
            )
            raise AttributeError(err_msg)

        # take whatever tags are provided by BaseEstimator, then modify
        # them with LightGBM-specific values
        return self._update_sklearn_tags_from_dict(
            tags=_LGBMModelBase.__sklearn_tags__(self),
            tags_dict=self._more_tags(),
        )

733
734
735
    def __sklearn_is_fitted__(self) -> bool:
        return getattr(self, "fitted_", False)

736
    def get_params(self, deep: bool = True) -> Dict[str, Any]:
737
738
739
740
741
742
743
744
745
746
747
748
749
        """Get parameters for this estimator.

        Parameters
        ----------
        deep : bool, optional (default=True)
            If True, will return the parameters for this estimator and
            contained subobjects that are estimators.

        Returns
        -------
        params : dict
            Parameter names mapped to their values.
        """
750
        params = super().get_params(deep=deep)
751
        params.update(self._other_params)
wxchan's avatar
wxchan committed
752
753
        return params

754
    def set_params(self, **params: Any) -> "LGBMModel":
755
756
757
758
759
760
761
762
763
764
765
766
        """Set the parameters of this estimator.

        Parameters
        ----------
        **params
            Parameter names with their new values.

        Returns
        -------
        self : object
            Returns self.
        """
wxchan's avatar
wxchan committed
767
768
        for key, value in params.items():
            setattr(self, key, value)
769
770
            if hasattr(self, f"_{key}"):
                setattr(self, f"_{key}", value)
771
            self._other_params[key] = value
wxchan's avatar
wxchan committed
772
        return self
wxchan's avatar
wxchan committed
773

774
775
776
777
778
779
780
781
782
783
784
785
786
787
    def _process_params(self, stage: str) -> Dict[str, Any]:
        """Process the parameters of this estimator based on its type, parameter aliases, etc.

        Parameters
        ----------
        stage : str
            Name of the stage (can be ``fit`` or ``predict``) this method is called from.

        Returns
        -------
        processed_params : dict
            Processed parameter names mapped to their values.
        """
        assert stage in {"fit", "predict"}
788
789
        params = self.get_params()

790
791
        params.pop("objective", None)
        for alias in _ConfigAliases.get("objective"):
792
            if alias in params:
793
                obj = params.pop(alias)
794
                _log_warning(f"Found '{alias}' in params. Will use it instead of 'objective' argument")
795
796
797
798
799
800
801
802
803
804
805
806
807
                if stage == "fit":
                    self._objective = obj
        if stage == "fit":
            if self._objective is None:
                if isinstance(self, LGBMRegressor):
                    self._objective = "regression"
                elif isinstance(self, LGBMClassifier):
                    if self._n_classes > 2:
                        self._objective = "multiclass"
                    else:
                        self._objective = "binary"
                elif isinstance(self, LGBMRanker):
                    self._objective = "lambdarank"
808
                else:
809
                    raise ValueError("Unknown LGBMModel type.")
810
        if callable(self._objective):
811
            if stage == "fit":
812
                params["objective"] = _ObjectiveFunctionWrapper(self._objective)
813
            else:
814
                params["objective"] = "None"
815
        else:
816
            params["objective"] = self._objective
817

818
819
820
        params.pop("importance_type", None)
        params.pop("n_estimators", None)
        params.pop("class_weight", None)
821

822
823
        if isinstance(params["random_state"], np.random.RandomState):
            params["random_state"] = params["random_state"].randint(np.iinfo(np.int32).max)
824
        elif isinstance(params["random_state"], np.random.Generator):
825
            params["random_state"] = int(params["random_state"].integers(np.iinfo(np.int32).max))
826
        if self._n_classes > 2:
827
            for alias in _ConfigAliases.get("num_class"):
828
                params.pop(alias, None)
829
830
            params["num_class"] = self._n_classes
        if hasattr(self, "_eval_at"):
831
            eval_at = self._eval_at
832
            for alias in _ConfigAliases.get("eval_at"):
833
834
835
                if alias in params:
                    _log_warning(f"Found '{alias}' in params. Will use it instead of 'eval_at' argument")
                    eval_at = params.pop(alias)
836
            params["eval_at"] = eval_at
wxchan's avatar
wxchan committed
837

838
        # register default metric for consistency with callable eval_metric case
839
        original_metric = self._objective if isinstance(self._objective, str) else None
840
841
842
843
844
845
846
847
848
849
        if original_metric is None:
            # try to deduce from class instance
            if isinstance(self, LGBMRegressor):
                original_metric = "l2"
            elif isinstance(self, LGBMClassifier):
                original_metric = "multi_logloss" if self._n_classes > 2 else "binary_logloss"
            elif isinstance(self, LGBMRanker):
                original_metric = "ndcg"

        # overwrite default metric by explicitly set metric
850
        params = _choose_param_value("metric", params, original_metric)
851

852
853
854
855
856
857
        # use joblib conventions for negative n_jobs, just like scikit-learn
        # at predict time, this is handled later due to the order of parameter updates
        if stage == "fit":
            params = _choose_param_value("num_threads", params, self.n_jobs)
            params["num_threads"] = self._process_n_jobs(params["num_threads"])

858
859
        return params

860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
    def _process_n_jobs(self, n_jobs: Optional[int]) -> int:
        """Convert special values of n_jobs to their actual values according to the formulas that apply.

        Parameters
        ----------
        n_jobs : int or None
            The original value of n_jobs, potentially having special values such as 'None' or
            negative integers.

        Returns
        -------
        n_jobs : int
            The value of n_jobs with special values converted to actual number of threads.
        """
        if n_jobs is None:
            n_jobs = _LGBMCpuCount(only_physical_cores=True)
        elif n_jobs < 0:
            n_jobs = max(_LGBMCpuCount(only_physical_cores=False) + 1 + n_jobs, 1)
        return n_jobs

880
881
    def fit(
        self,
882
883
        X: _LGBM_ScikitMatrixLike,
        y: _LGBM_LabelType,
884
885
        sample_weight: Optional[_LGBM_WeightType] = None,
        init_score: Optional[_LGBM_InitScoreType] = None,
886
        group: Optional[_LGBM_GroupType] = None,
887
        eval_set: Optional[List[_LGBM_ScikitValidSet]] = None,
888
        eval_names: Optional[List[str]] = None,
889
890
891
892
        eval_sample_weight: Optional[List[_LGBM_WeightType]] = None,
        eval_class_weight: Optional[List[float]] = None,
        eval_init_score: Optional[List[_LGBM_InitScoreType]] = None,
        eval_group: Optional[List[_LGBM_GroupType]] = None,
893
        eval_metric: Optional[_LGBM_ScikitEvalMetricType] = None,
894
895
        feature_name: _LGBM_FeatureNameConfiguration = "auto",
        categorical_feature: _LGBM_CategoricalFeatureConfiguration = "auto",
896
        callbacks: Optional[List[Callable]] = None,
897
        init_model: Optional[Union[str, Path, Booster, "LGBMModel"]] = None,
898
    ) -> "LGBMModel":
899
900
901
902
903
        """Docstring is set after definition, using a template."""
        params = self._process_params(stage="fit")

        # Do not modify original args in fit function
        # Refer to https://github.com/microsoft/LightGBM/pull/2619
904
905
906
907
908
909
910
        eval_metric_list: List[Union[str, _LGBM_ScikitCustomEvalFunction]]
        if eval_metric is None:
            eval_metric_list = []
        elif isinstance(eval_metric, list):
            eval_metric_list = copy.deepcopy(eval_metric)
        else:
            eval_metric_list = [copy.deepcopy(eval_metric)]
911
912
913
914
915

        # Separate built-in from callable evaluation metrics
        eval_metrics_callable = [_EvalFunctionWrapper(f) for f in eval_metric_list if callable(f)]
        eval_metrics_builtin = [m for m in eval_metric_list if isinstance(m, str)]

916
        # concatenate metric from params (or default if not provided in params) and eval_metric
917
918
919
        params["metric"] = [params["metric"]] if isinstance(params["metric"], (str, type(None))) else params["metric"]
        params["metric"] = [e for e in eval_metrics_builtin if e not in params["metric"]] + params["metric"]
        params["metric"] = [metric for metric in params["metric"] if metric is not None]
wxchan's avatar
wxchan committed
920

921
        if not isinstance(X, (pd_DataFrame, dt_DataTable)):
922
923
924
925
926
927
928
929
930
931
932
933
            _X, _y = _LGBMValidateData(
                self,
                X,
                y,
                reset=True,
                # allow any input type (this validation is done further down, in lgb.Dataset())
                accept_sparse=True,
                # do not raise an error if Inf of NaN values are found (LightGBM handles these internally)
                ensure_all_finite=False,
                # raise an error on 0-row and 1-row inputs
                ensure_min_samples=2,
            )
934
935
            if sample_weight is not None:
                sample_weight = _LGBMCheckSampleWeight(sample_weight, _X)
936
937
        else:
            _X, _y = X, y
938

939
940
941
            # for other data types, setting n_features_in_ is handled by _LGBMValidateData() in the branch above
            self.n_features_in_ = _X.shape[1]

942
943
944
945
        if self._class_weight is None:
            self._class_weight = self.class_weight
        if self._class_weight is not None:
            class_sample_weight = _LGBMComputeSampleWeight(self._class_weight, y)
946
947
948
949
            if sample_weight is None or len(sample_weight) == 0:
                sample_weight = class_sample_weight
            else:
                sample_weight = np.multiply(sample_weight, class_sample_weight)
950

951
952
953
954
955
956
957
        train_set = Dataset(
            data=_X,
            label=_y,
            weight=sample_weight,
            group=group,
            init_score=init_score,
            categorical_feature=categorical_feature,
958
            feature_name=feature_name,
959
960
            params=params,
        )
Guolin Ke's avatar
Guolin Ke committed
961

962
        valid_sets: List[Dataset] = []
Guolin Ke's avatar
Guolin Ke committed
963
964
965
966
        if eval_set is not None:
            if isinstance(eval_set, tuple):
                eval_set = [eval_set]
            for i, valid_data in enumerate(eval_set):
967
                # reduce cost for prediction training data
Guolin Ke's avatar
Guolin Ke committed
968
969
970
                if valid_data[0] is X and valid_data[1] is y:
                    valid_set = train_set
                else:
971
972
973
974
975
976
977
978
979
980
                    valid_weight = _extract_evaluation_meta_data(
                        collection=eval_sample_weight,
                        name="eval_sample_weight",
                        i=i,
                    )
                    valid_class_weight = _extract_evaluation_meta_data(
                        collection=eval_class_weight,
                        name="eval_class_weight",
                        i=i,
                    )
981
982
983
984
                    if valid_class_weight is not None:
                        if isinstance(valid_class_weight, dict) and self._class_map is not None:
                            valid_class_weight = {self._class_map[k]: v for k, v in valid_class_weight.items()}
                        valid_class_sample_weight = _LGBMComputeSampleWeight(valid_class_weight, valid_data[1])
985
986
987
988
                        if valid_weight is None or len(valid_weight) == 0:
                            valid_weight = valid_class_sample_weight
                        else:
                            valid_weight = np.multiply(valid_weight, valid_class_sample_weight)
989
990
991
992
993
994
995
996
997
998
                    valid_init_score = _extract_evaluation_meta_data(
                        collection=eval_init_score,
                        name="eval_init_score",
                        i=i,
                    )
                    valid_group = _extract_evaluation_meta_data(
                        collection=eval_group,
                        name="eval_group",
                        i=i,
                    )
999
1000
1001
1002
1003
1004
1005
1006
1007
                    valid_set = Dataset(
                        data=valid_data[0],
                        label=valid_data[1],
                        weight=valid_weight,
                        group=valid_group,
                        init_score=valid_init_score,
                        categorical_feature="auto",
                        params=params,
                    )
1008

Guolin Ke's avatar
Guolin Ke committed
1009
1010
                valid_sets.append(valid_set)

1011
1012
1013
        if isinstance(init_model, LGBMModel):
            init_model = init_model.booster_

1014
1015
1016
        if callbacks is None:
            callbacks = []
        else:
1017
            callbacks = copy.copy(callbacks)  # don't use deepcopy here to allow non-serializable objects
1018

1019
        evals_result: _EvalResultDict = {}
1020
1021
1022
1023
1024
1025
1026
1027
        callbacks.append(record_evaluation(evals_result))

        self._Booster = train(
            params=params,
            train_set=train_set,
            num_boost_round=self.n_estimators,
            valid_sets=valid_sets,
            valid_names=eval_names,
1028
            feval=eval_metrics_callable,  # type: ignore[arg-type]
1029
            init_model=init_model,
1030
            callbacks=callbacks,
1031
        )
wxchan's avatar
wxchan committed
1032

1033
1034
1035
1036
1037
1038
1039
        # This populates the property self.n_features_, the number of features in the fitted model,
        # and so should only be set after fitting.
        #
        # The related property self._n_features_in, which populates self.n_features_in_,
        # is set BEFORE fitting.
        self._n_features = self._Booster.num_feature()

1040
        self._evals_result = evals_result
1041
        self._best_iteration = self._Booster.best_iteration
1042
        self._best_score = self._Booster.best_score
wxchan's avatar
wxchan committed
1043

1044
1045
        self.fitted_ = True

wxchan's avatar
wxchan committed
1046
        # free dataset
1047
        self._Booster.free_dataset()
wxchan's avatar
wxchan committed
1048
        del train_set, valid_sets
wxchan's avatar
wxchan committed
1049
1050
        return self

1051
1052
    fit.__doc__ = (
        _lgbmmodel_doc_fit.format(
1053
            X_shape="numpy array, pandas DataFrame, H2O DataTable's Frame (deprecated), scipy.sparse, list of lists of int or float of shape = [n_samples, n_features]",
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
            y_shape="numpy array, pandas DataFrame, pandas Series, list of int or float of shape = [n_samples]",
            sample_weight_shape="numpy array, pandas Series, list of int or float of shape = [n_samples] or None, optional (default=None)",
            init_score_shape="numpy array, pandas DataFrame, pandas Series, list of int or float of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task) or shape = [n_samples, n_classes] (for multi-class task) or None, optional (default=None)",
            group_shape="numpy array, pandas Series, list of int or float, or None, optional (default=None)",
            eval_sample_weight_shape="list of array (same types as ``sample_weight`` supports), or None, optional (default=None)",
            eval_init_score_shape="list of array (same types as ``init_score`` supports), or None, optional (default=None)",
            eval_group_shape="list of array (same types as ``group`` supports), or None, optional (default=None)",
        )
        + "\n\n"
        + _lgbmmodel_doc_custom_eval_note
    )
1065

1066
1067
    def predict(
        self,
1068
        X: _LGBM_ScikitMatrixLike,
1069
1070
1071
1072
1073
1074
        raw_score: bool = False,
        start_iteration: int = 0,
        num_iteration: Optional[int] = None,
        pred_leaf: bool = False,
        pred_contrib: bool = False,
        validate_features: bool = False,
1075
        **kwargs: Any,
1076
    ):
1077
        """Docstring is set after definition, using a template."""
1078
        if not self.__sklearn_is_fitted__():
1079
            raise LGBMNotFittedError("Estimator not fitted, call fit before exploiting the model.")
1080
        if not isinstance(X, (pd_DataFrame, dt_DataTable)):
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
            X = _LGBMValidateData(
                self,
                X,
                # 'y' being omitted = run scikit-learn's check_array() instead of check_X_y()
                #
                # Prevent scikit-learn from deleting or modifying attributes like 'feature_names_in_' and 'n_features_in_'.
                # These shouldn't be changed at predict() time.
                reset=False,
                # allow any input type (this validation is done further down, in lgb.Dataset())
                accept_sparse=True,
                # do not raise an error if Inf of NaN values are found (LightGBM handles these internally)
                ensure_all_finite=False,
                # raise an error on 0-row inputs
                ensure_min_samples=1,
1095
            )
1096
        # retrieve original params that possibly can be used in both training and prediction
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
        # and then overwrite them (considering aliases) with params that were passed directly in prediction
        predict_params = self._process_params(stage="predict")
        for alias in _ConfigAliases.get_by_alias(
            "data",
            "X",
            "raw_score",
            "start_iteration",
            "num_iteration",
            "pred_leaf",
            "pred_contrib",
1107
            *kwargs.keys(),
1108
1109
1110
        ):
            predict_params.pop(alias, None)
        predict_params.update(kwargs)
1111
1112
1113

        # number of threads can have values with special meaning which is only applied
        # in the scikit-learn interface, these should not reach the c++ side as-is
1114
1115
        predict_params = _choose_param_value("num_threads", predict_params, self.n_jobs)
        predict_params["num_threads"] = self._process_n_jobs(predict_params["num_threads"])
1116

1117
        return self._Booster.predict(  # type: ignore[union-attr]
1118
1119
1120
1121
1122
1123
1124
1125
            X,
            raw_score=raw_score,
            start_iteration=start_iteration,
            num_iteration=num_iteration,
            pred_leaf=pred_leaf,
            pred_contrib=pred_contrib,
            validate_features=validate_features,
            **predict_params,
1126
        )
wxchan's avatar
wxchan committed
1127

1128
1129
    predict.__doc__ = _lgbmmodel_doc_predict.format(
        description="Return the predicted value for each sample.",
1130
        X_shape="numpy array, pandas DataFrame, H2O DataTable's Frame (deprecated), scipy.sparse, list of lists of int or float of shape = [n_samples, n_features]",
1131
1132
1133
        output_name="predicted_result",
        predicted_result_shape="array-like of shape = [n_samples] or shape = [n_samples, n_classes]",
        X_leaves_shape="array-like of shape = [n_samples, n_trees] or shape = [n_samples, n_trees * n_classes]",
1134
        X_SHAP_values_shape="array-like of shape = [n_samples, n_features + 1] or shape = [n_samples, (n_features + 1) * n_classes] or list with n_classes length of such objects",
1135
1136
    )

1137
    @property
1138
    def n_features_(self) -> int:
1139
        """:obj:`int`: The number of features of fitted model."""
1140
        if not self.__sklearn_is_fitted__():
1141
            raise LGBMNotFittedError("No n_features found. Need to call fit beforehand.")
1142
1143
        return self._n_features

1144
    @property
1145
    def n_features_in_(self) -> int:
1146
        """:obj:`int`: The number of features of fitted model."""
1147
        if not self.__sklearn_is_fitted__():
1148
            raise LGBMNotFittedError("No n_features_in found. Need to call fit beforehand.")
1149
1150
        return self._n_features_in

1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
    @n_features_in_.setter
    def n_features_in_(self, value: int) -> None:
        """Set number of features found in passed-in dataset.

        Starting with ``scikit-learn`` 1.6, ``scikit-learn`` expects to be able to directly
        set this property in functions like ``validate_data()``.

        .. note::

            Do not call ``estimator.n_features_in_ = some_int`` or anything else that invokes
            this method. It is only here for compatibility with ``scikit-learn`` validation
            functions used internally in ``lightgbm``.
        """
        self._n_features_in = value

1166
    @property
1167
    def best_score_(self) -> _LGBM_BoosterBestScoreType:
1168
        """:obj:`dict`: The best score of fitted model."""
1169
        if not self.__sklearn_is_fitted__():
1170
            raise LGBMNotFittedError("No best_score found. Need to call fit beforehand.")
1171
1172
1173
        return self._best_score

    @property
1174
    def best_iteration_(self) -> int:
1175
        """:obj:`int`: The best iteration of fitted model if ``early_stopping()`` callback has been specified."""
1176
        if not self.__sklearn_is_fitted__():
1177
1178
1179
            raise LGBMNotFittedError(
                "No best_iteration found. Need to call fit with early_stopping callback beforehand."
            )
1180
1181
1182
        return self._best_iteration

    @property
1183
    def objective_(self) -> Union[str, _LGBM_ScikitCustomObjectiveFunction]:
1184
        """:obj:`str` or :obj:`callable`: The concrete objective used while fitting this model."""
1185
        if not self.__sklearn_is_fitted__():
1186
            raise LGBMNotFittedError("No objective found. Need to call fit beforehand.")
1187
        return self._objective  # type: ignore[return-value]
1188

1189
1190
1191
1192
1193
1194
    @property
    def n_estimators_(self) -> int:
        """:obj:`int`: True number of boosting iterations performed.

        This might be less than parameter ``n_estimators`` if early stopping was enabled or
        if boosting stopped early due to limits on complexity like ``min_gain_to_split``.
1195

1196
        .. versionadded:: 4.0.0
1197
1198
        """
        if not self.__sklearn_is_fitted__():
1199
            raise LGBMNotFittedError("No n_estimators found. Need to call fit beforehand.")
1200
        return self._Booster.current_iteration()  # type: ignore
1201
1202
1203
1204
1205
1206
1207

    @property
    def n_iter_(self) -> int:
        """:obj:`int`: True number of boosting iterations performed.

        This might be less than parameter ``n_estimators`` if early stopping was enabled or
        if boosting stopped early due to limits on complexity like ``min_gain_to_split``.
1208

1209
        .. versionadded:: 4.0.0
1210
1211
        """
        if not self.__sklearn_is_fitted__():
1212
            raise LGBMNotFittedError("No n_iter found. Need to call fit beforehand.")
1213
        return self._Booster.current_iteration()  # type: ignore
1214

1215
    @property
1216
    def booster_(self) -> Booster:
1217
        """Booster: The underlying Booster of this model."""
1218
        if not self.__sklearn_is_fitted__():
1219
            raise LGBMNotFittedError("No booster found. Need to call fit beforehand.")
1220
        return self._Booster  # type: ignore[return-value]
wxchan's avatar
wxchan committed
1221

1222
    @property
1223
    def evals_result_(self) -> _EvalResultDict:
1224
        """:obj:`dict`: The evaluation results if validation sets have been specified."""
1225
        if not self.__sklearn_is_fitted__():
1226
            raise LGBMNotFittedError("No results found. Need to call fit with eval_set beforehand.")
1227
        return self._evals_result
1228
1229

    @property
1230
    def feature_importances_(self) -> np.ndarray:
1231
        """:obj:`array` of shape = [n_features]: The feature importances (the higher, the more important).
1232

Nikita Titov's avatar
Nikita Titov committed
1233
1234
1235
1236
        .. note::

            ``importance_type`` attribute is passed to the function
            to configure the type of importance values to be extracted.
1237
        """
1238
        if not self.__sklearn_is_fitted__():
1239
            raise LGBMNotFittedError("No feature_importances found. Need to call fit beforehand.")
1240
        return self._Booster.feature_importance(importance_type=self.importance_type)  # type: ignore[union-attr]
wxchan's avatar
wxchan committed
1241

1242
    @property
1243
    def feature_name_(self) -> List[str]:
1244
1245
1246
1247
1248
1249
        """:obj:`list` of shape = [n_features]: The names of features.

        .. note::

            If input does not contain feature names, they will be added during fitting in the format ``Column_0``, ``Column_1``, ..., ``Column_N``.
        """
1250
        if not self.__sklearn_is_fitted__():
1251
            raise LGBMNotFittedError("No feature_name found. Need to call fit beforehand.")
1252
        return self._Booster.feature_name()  # type: ignore[union-attr]
1253

1254
1255
    @property
    def feature_names_in_(self) -> np.ndarray:
James Lamb's avatar
James Lamb committed
1256
1257
1258
1259
        """:obj:`array` of shape = [n_features]: scikit-learn compatible version of ``.feature_name_``.

        .. versionadded:: 4.5.0
        """
1260
1261
1262
1263
        if not self.__sklearn_is_fitted__():
            raise LGBMNotFittedError("No feature_names_in_ found. Need to call fit beforehand.")
        return np.array(self.feature_name_)

1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
    @feature_names_in_.deleter
    def feature_names_in_(self) -> None:
        """Intercept calls to delete ``feature_names_in_``.

        Some code paths in ``scikit-learn`` try to delete the ``feature_names_in_`` attribute
        on estimators when a new training dataset that doesn't have features is passed.
        LightGBM automatically assigns feature names to such datasets
        (like ``Column_0``, ``Column_1``, etc.) and so does not want that behavior.

        However, that behavior is coupled to ``scikit-learn`` automatically updating
        ``n_features_in_`` in those same code paths, which is necessary for compliance
        with its API (via argument ``reset`` to functions like ``validate_data()`` and
        ``check_array()``).

        .. note::

            Do not call ``del estimator.feature_names_in_`` or anything else that invokes
            this method. It is only here for compatibility with ``scikit-learn`` validation
            functions used internally in ``lightgbm``.
        """
        pass

wxchan's avatar
wxchan committed
1286

1287
class LGBMRegressor(_LGBMRegressorBase, LGBMModel):
1288
    """LightGBM regressor."""
wxchan's avatar
wxchan committed
1289

1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
    def _more_tags(self) -> Dict[str, Any]:
        # handle the case where RegressorMixin possibly provides _more_tags()
        if callable(getattr(_LGBMRegressorBase, "_more_tags", None)):
            tags = _LGBMRegressorBase._more_tags(self)
        else:
            tags = {}
        # override those with LightGBM-specific preferences
        tags.update(LGBMModel._more_tags(self))
        return tags

    def __sklearn_tags__(self) -> "_sklearn_Tags":
1301
1302
1303
1304
        tags = LGBMModel.__sklearn_tags__(self)
        tags.estimator_type = "regressor"
        tags.regressor_tags = _sklearn_RegressorTags(multi_label=False)
        return tags
1305

1306
    def fit(  # type: ignore[override]
1307
        self,
1308
1309
        X: _LGBM_ScikitMatrixLike,
        y: _LGBM_LabelType,
1310
1311
1312
        sample_weight: Optional[_LGBM_WeightType] = None,
        init_score: Optional[_LGBM_InitScoreType] = None,
        eval_set: Optional[List[_LGBM_ScikitValidSet]] = None,
1313
        eval_names: Optional[List[str]] = None,
1314
1315
        eval_sample_weight: Optional[List[_LGBM_WeightType]] = None,
        eval_init_score: Optional[List[_LGBM_InitScoreType]] = None,
1316
        eval_metric: Optional[_LGBM_ScikitEvalMetricType] = None,
1317
1318
        feature_name: _LGBM_FeatureNameConfiguration = "auto",
        categorical_feature: _LGBM_CategoricalFeatureConfiguration = "auto",
1319
        callbacks: Optional[List[Callable]] = None,
1320
        init_model: Optional[Union[str, Path, Booster, LGBMModel]] = None,
1321
    ) -> "LGBMRegressor":
1322
        """Docstring is inherited from the LGBMModel."""
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
        super().fit(
            X,
            y,
            sample_weight=sample_weight,
            init_score=init_score,
            eval_set=eval_set,
            eval_names=eval_names,
            eval_sample_weight=eval_sample_weight,
            eval_init_score=eval_init_score,
            eval_metric=eval_metric,
            feature_name=feature_name,
            categorical_feature=categorical_feature,
            callbacks=callbacks,
1336
            init_model=init_model,
1337
        )
Guolin Ke's avatar
Guolin Ke committed
1338
1339
        return self

1340
    _base_doc = LGBMModel.fit.__doc__.replace("self : LGBMModel", "self : LGBMRegressor")  # type: ignore
1341
1342
1343
1344
1345
1346
    _base_doc = (
        _base_doc[: _base_doc.find("group :")]  # type: ignore
        + _base_doc[_base_doc.find("eval_set :") :]
    )  # type: ignore
    _base_doc = _base_doc[: _base_doc.find("eval_class_weight :")] + _base_doc[_base_doc.find("eval_init_score :") :]
    fit.__doc__ = _base_doc[: _base_doc.find("eval_group :")] + _base_doc[_base_doc.find("eval_metric :") :]
wxchan's avatar
wxchan committed
1347

1348

1349
class LGBMClassifier(_LGBMClassifierBase, LGBMModel):
1350
    """LightGBM classifier."""
wxchan's avatar
wxchan committed
1351

1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
    def _more_tags(self) -> Dict[str, Any]:
        # handle the case where ClassifierMixin possibly provides _more_tags()
        if callable(getattr(_LGBMClassifierBase, "_more_tags", None)):
            tags = _LGBMClassifierBase._more_tags(self)
        else:
            tags = {}
        # override those with LightGBM-specific preferences
        tags.update(LGBMModel._more_tags(self))
        return tags

    def __sklearn_tags__(self) -> "_sklearn_Tags":
1363
1364
1365
1366
        tags = LGBMModel.__sklearn_tags__(self)
        tags.estimator_type = "classifier"
        tags.classifier_tags = _sklearn_ClassifierTags(multi_class=True, multi_label=False)
        return tags
1367

1368
    def fit(  # type: ignore[override]
1369
        self,
1370
1371
        X: _LGBM_ScikitMatrixLike,
        y: _LGBM_LabelType,
1372
1373
1374
        sample_weight: Optional[_LGBM_WeightType] = None,
        init_score: Optional[_LGBM_InitScoreType] = None,
        eval_set: Optional[List[_LGBM_ScikitValidSet]] = None,
1375
        eval_names: Optional[List[str]] = None,
1376
1377
1378
        eval_sample_weight: Optional[List[_LGBM_WeightType]] = None,
        eval_class_weight: Optional[List[float]] = None,
        eval_init_score: Optional[List[_LGBM_InitScoreType]] = None,
1379
        eval_metric: Optional[_LGBM_ScikitEvalMetricType] = None,
1380
1381
        feature_name: _LGBM_FeatureNameConfiguration = "auto",
        categorical_feature: _LGBM_CategoricalFeatureConfiguration = "auto",
1382
        callbacks: Optional[List[Callable]] = None,
1383
        init_model: Optional[Union[str, Path, Booster, LGBMModel]] = None,
1384
    ) -> "LGBMClassifier":
1385
        """Docstring is inherited from the LGBMModel."""
1386
        _LGBMAssertAllFinite(y)
1387
1388
        _LGBMCheckClassificationTargets(y)
        self._le = _LGBMLabelEncoder().fit(y)
1389
        _y = self._le.transform(y)
1390
        self._class_map = dict(zip(self._le.classes_, self._le.transform(self._le.classes_)))
1391
1392
        if isinstance(self.class_weight, dict):
            self._class_weight = {self._class_map[k]: v for k, v in self.class_weight.items()}
1393

1394
        self._classes = self._le.classes_
1395
        self._n_classes = len(self._classes)  # type: ignore[arg-type]
1396
1397
        if self.objective is None:
            self._objective = None
1398

1399
1400
        # adjust eval metrics to match whether binary or multiclass
        # classification is being performed
1401
        if not callable(eval_metric):
1402
1403
1404
1405
1406
1407
            if isinstance(eval_metric, list):
                eval_metric_list = eval_metric
            elif isinstance(eval_metric, str):
                eval_metric_list = [eval_metric]
            else:
                eval_metric_list = []
1408
            if self.__is_multiclass:
1409
                for index, metric in enumerate(eval_metric_list):
1410
                    if metric in {"logloss", "binary_logloss"}:
1411
                        eval_metric_list[index] = "multi_logloss"
1412
                    elif metric in {"error", "binary_error"}:
1413
                        eval_metric_list[index] = "multi_error"
1414
            else:
1415
                for index, metric in enumerate(eval_metric_list):
1416
1417
1418
1419
                    if metric in {"logloss", "multi_logloss"}:
                        eval_metric_list[index] = "binary_logloss"
                    elif metric in {"error", "multi_error"}:
                        eval_metric_list[index] = "binary_error"
1420
            eval_metric = eval_metric_list
wxchan's avatar
wxchan committed
1421

1422
        # do not modify args, as it causes errors in model selection tools
1423
        valid_sets: Optional[List[_LGBM_ScikitValidSet]] = None
wxchan's avatar
wxchan committed
1424
        if eval_set is not None:
1425
1426
            if isinstance(eval_set, tuple):
                eval_set = [eval_set]
1427
1428
            valid_sets = []
            for valid_x, valid_y in eval_set:
1429
                if valid_x is X and valid_y is y:
1430
                    valid_sets.append((valid_x, _y))
1431
                else:
1432
                    valid_sets.append((valid_x, self._le.transform(valid_y)))
1433

1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
        super().fit(
            X,
            _y,
            sample_weight=sample_weight,
            init_score=init_score,
            eval_set=valid_sets,
            eval_names=eval_names,
            eval_sample_weight=eval_sample_weight,
            eval_class_weight=eval_class_weight,
            eval_init_score=eval_init_score,
            eval_metric=eval_metric,
            feature_name=feature_name,
            categorical_feature=categorical_feature,
            callbacks=callbacks,
1448
            init_model=init_model,
1449
        )
wxchan's avatar
wxchan committed
1450
1451
        return self

1452
    _base_doc = LGBMModel.fit.__doc__.replace("self : LGBMModel", "self : LGBMClassifier")  # type: ignore
1453
1454
1455
1456
1457
    _base_doc = (
        _base_doc[: _base_doc.find("group :")]  # type: ignore
        + _base_doc[_base_doc.find("eval_set :") :]
    )  # type: ignore
    fit.__doc__ = _base_doc[: _base_doc.find("eval_group :")] + _base_doc[_base_doc.find("eval_metric :") :]
1458

1459
1460
    def predict(
        self,
1461
        X: _LGBM_ScikitMatrixLike,
1462
1463
1464
1465
1466
1467
        raw_score: bool = False,
        start_iteration: int = 0,
        num_iteration: Optional[int] = None,
        pred_leaf: bool = False,
        pred_contrib: bool = False,
        validate_features: bool = False,
1468
        **kwargs: Any,
1469
    ):
1470
        """Docstring is inherited from the LGBMModel."""
1471
1472
1473
1474
1475
1476
1477
1478
        result = self.predict_proba(
            X=X,
            raw_score=raw_score,
            start_iteration=start_iteration,
            num_iteration=num_iteration,
            pred_leaf=pred_leaf,
            pred_contrib=pred_contrib,
            validate_features=validate_features,
1479
            **kwargs,
1480
        )
1481
        if callable(self._objective) or raw_score or pred_leaf or pred_contrib:
1482
1483
1484
1485
            return result
        else:
            class_index = np.argmax(result, axis=1)
            return self._le.inverse_transform(class_index)
wxchan's avatar
wxchan committed
1486

1487
1488
    predict.__doc__ = LGBMModel.predict.__doc__

1489
1490
    def predict_proba(
        self,
1491
        X: _LGBM_ScikitMatrixLike,
1492
1493
1494
1495
1496
1497
        raw_score: bool = False,
        start_iteration: int = 0,
        num_iteration: Optional[int] = None,
        pred_leaf: bool = False,
        pred_contrib: bool = False,
        validate_features: bool = False,
1498
        **kwargs: Any,
1499
    ):
1500
        """Docstring is set after definition, using a template."""
1501
1502
1503
1504
1505
1506
1507
1508
        result = super().predict(
            X=X,
            raw_score=raw_score,
            start_iteration=start_iteration,
            num_iteration=num_iteration,
            pred_leaf=pred_leaf,
            pred_contrib=pred_contrib,
            validate_features=validate_features,
1509
            **kwargs,
1510
        )
1511
        if callable(self._objective) and not (raw_score or pred_leaf or pred_contrib):
1512
1513
1514
1515
1516
            _log_warning(
                "Cannot compute class probabilities or labels "
                "due to the usage of customized objective function.\n"
                "Returning raw scores instead."
            )
1517
            return result
1518
        elif self.__is_multiclass or raw_score or pred_leaf or pred_contrib:  # type: ignore [operator]
1519
            return result
wxchan's avatar
wxchan committed
1520
        else:
1521
            return np.vstack((1.0 - result, result)).transpose()
1522

1523
1524
    predict_proba.__doc__ = _lgbmmodel_doc_predict.format(
        description="Return the predicted probability for each class for each sample.",
1525
        X_shape="numpy array, pandas DataFrame, H2O DataTable's Frame (deprecated), scipy.sparse, list of lists of int or float of shape = [n_samples, n_features]",
1526
        output_name="predicted_probability",
1527
        predicted_result_shape="array-like of shape = [n_samples] or shape = [n_samples, n_classes]",
1528
        X_leaves_shape="array-like of shape = [n_samples, n_trees] or shape = [n_samples, n_trees * n_classes]",
1529
        X_SHAP_values_shape="array-like of shape = [n_samples, n_features + 1] or shape = [n_samples, (n_features + 1) * n_classes] or list with n_classes length of such objects",
1530
1531
    )

1532
    @property
1533
    def classes_(self) -> np.ndarray:
1534
        """:obj:`array` of shape = [n_classes]: The class label array."""
1535
        if not self.__sklearn_is_fitted__():
1536
            raise LGBMNotFittedError("No classes found. Need to call fit beforehand.")
1537
        return self._classes  # type: ignore[return-value]
1538
1539

    @property
1540
    def n_classes_(self) -> int:
1541
        """:obj:`int`: The number of classes."""
1542
        if not self.__sklearn_is_fitted__():
1543
            raise LGBMNotFittedError("No classes found. Need to call fit beforehand.")
1544
        return self._n_classes
wxchan's avatar
wxchan committed
1545

1546
1547
1548
1549
1550
    @property
    def __is_multiclass(self) -> bool:
        """:obj:`bool`:  Indicator of whether the classifier is used for multiclass."""
        return self._n_classes > 2 or (isinstance(self._objective, str) and self._objective in _MULTICLASS_OBJECTIVES)

wxchan's avatar
wxchan committed
1551

wxchan's avatar
wxchan committed
1552
class LGBMRanker(LGBMModel):
1553
1554
1555
1556
1557
1558
1559
1560
    """LightGBM ranker.

    .. warning::

        scikit-learn doesn't support ranking applications yet,
        therefore this class is not really compatible with the sklearn ecosystem.
        Please use this class mainly for training and applying ranking models in common sklearnish way.
    """
wxchan's avatar
wxchan committed
1561

1562
    def fit(  # type: ignore[override]
1563
        self,
1564
1565
        X: _LGBM_ScikitMatrixLike,
        y: _LGBM_LabelType,
1566
1567
        sample_weight: Optional[_LGBM_WeightType] = None,
        init_score: Optional[_LGBM_InitScoreType] = None,
1568
        group: Optional[_LGBM_GroupType] = None,
1569
        eval_set: Optional[List[_LGBM_ScikitValidSet]] = None,
1570
        eval_names: Optional[List[str]] = None,
1571
1572
1573
        eval_sample_weight: Optional[List[_LGBM_WeightType]] = None,
        eval_init_score: Optional[List[_LGBM_InitScoreType]] = None,
        eval_group: Optional[List[_LGBM_GroupType]] = None,
1574
        eval_metric: Optional[_LGBM_ScikitEvalMetricType] = None,
1575
        eval_at: Union[List[int], Tuple[int, ...]] = (1, 2, 3, 4, 5),
1576
1577
        feature_name: _LGBM_FeatureNameConfiguration = "auto",
        categorical_feature: _LGBM_CategoricalFeatureConfiguration = "auto",
1578
        callbacks: Optional[List[Callable]] = None,
1579
        init_model: Optional[Union[str, Path, Booster, LGBMModel]] = None,
1580
    ) -> "LGBMRanker":
1581
        """Docstring is inherited from the LGBMModel."""
1582
        # check group data
Guolin Ke's avatar
Guolin Ke committed
1583
        if group is None:
1584
            raise ValueError("Should set group for ranking task")
wxchan's avatar
wxchan committed
1585
1586

        if eval_set is not None:
Guolin Ke's avatar
Guolin Ke committed
1587
            if eval_group is None:
1588
                raise ValueError("Eval_group cannot be None when eval_set is not None")
Guolin Ke's avatar
Guolin Ke committed
1589
            elif len(eval_group) != len(eval_set):
1590
                raise ValueError("Length of eval_group should be equal to eval_set")
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
            elif (
                isinstance(eval_group, dict)
                and any(i not in eval_group or eval_group[i] is None for i in range(len(eval_group)))
                or isinstance(eval_group, list)
                and any(group is None for group in eval_group)
            ):
                raise ValueError(
                    "Should set group for all eval datasets for ranking task; "
                    "if you use dict, the index should start from 0"
                )
1601

1602
        self._eval_at = eval_at
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
        super().fit(
            X,
            y,
            sample_weight=sample_weight,
            init_score=init_score,
            group=group,
            eval_set=eval_set,
            eval_names=eval_names,
            eval_sample_weight=eval_sample_weight,
            eval_init_score=eval_init_score,
            eval_group=eval_group,
            eval_metric=eval_metric,
            feature_name=feature_name,
            categorical_feature=categorical_feature,
            callbacks=callbacks,
1618
            init_model=init_model,
1619
        )
wxchan's avatar
wxchan committed
1620
        return self
1621

1622
    _base_doc = LGBMModel.fit.__doc__.replace("self : LGBMModel", "self : LGBMRanker")  # type: ignore
1623
1624
1625
1626
    fit.__doc__ = (
        _base_doc[: _base_doc.find("eval_class_weight :")]  # type: ignore
        + _base_doc[_base_doc.find("eval_init_score :") :]
    )  # type: ignore
1627
    _base_doc = fit.__doc__
1628
    _before_feature_name, _feature_name, _after_feature_name = _base_doc.partition("feature_name :")
1629
    fit.__doc__ = f"""{_before_feature_name}eval_at : list or tuple of int, optional (default=(1, 2, 3, 4, 5))
1630
        The evaluation positions of the specified metric.
1631
    {_feature_name}{_after_feature_name}"""