"src/git@developer.sourcefind.cn:tianlh/lightgbm-dcu.git" did not exist on "d951be99ec724ae7ec27d680463cdbf7a442e963"
sklearn.py 53.6 KB
Newer Older
wxchan's avatar
wxchan committed
1
# coding: utf-8
2
"""Scikit-learn wrapper interface for LightGBM."""
3
import copy
4
from inspect import signature
5
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
6

wxchan's avatar
wxchan committed
7
import numpy as np
8

9
from .basic import Booster, Dataset, LightGBMError, _ArrayLike, _choose_param_value, _ConfigAliases, _log_warning
10
from .callback import log_evaluation, record_evaluation
11
12
13
from .compat import (SKLEARN_INSTALLED, LGBMNotFittedError, _LGBMAssertAllFinite, _LGBMCheckArray,
                     _LGBMCheckClassificationTargets, _LGBMCheckSampleWeight, _LGBMCheckXY, _LGBMClassifierBase,
                     _LGBMComputeSampleWeight, _LGBMLabelEncoder, _LGBMModelBase, _LGBMRegressorBase, dt_DataTable,
14
                     pd_DataFrame)
wxchan's avatar
wxchan committed
15
from .engine import train
16

17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
_EvalResultType = Tuple[str, float, bool]

_LGBM_ScikitCustomObjectiveFunction = Union[
    Callable[
        [np.ndarray, np.ndarray],
        Tuple[_ArrayLike, _ArrayLike]
    ],
    Callable[
        [np.ndarray, np.ndarray, np.ndarray],
        Tuple[_ArrayLike, _ArrayLike]
    ],
]
_LGBM_ScikitCustomEvalFunction = Union[
    Callable[
        [np.ndarray, np.ndarray],
        Union[_EvalResultType, List[_EvalResultType]]
    ],
    Callable[
        [np.ndarray, np.ndarray, np.ndarray],
        Union[_EvalResultType, List[_EvalResultType]]
    ],
    Callable[
        [np.ndarray, np.ndarray, np.ndarray, np.ndarray],
        Union[_EvalResultType, List[_EvalResultType]]
    ],
]

wxchan's avatar
wxchan committed
44

45
class _ObjectiveFunctionWrapper:
46
    """Proxy class for objective function."""
47

48
    def __init__(self, func: _LGBM_ScikitCustomObjectiveFunction):
49
        """Construct a proxy class.
50

51
52
        This class transforms objective function to match objective function with signature ``new_func(preds, dataset)``
        as expected by ``lightgbm.engine.train``.
53

54
55
56
        Parameters
        ----------
        func : callable
57
            Expects a callable with signature ``func(y_true, y_pred)`` or ``func(y_true, y_pred, group)``
58
59
            and returns (grad, hess):

60
                y_true : numpy 1-D array of shape = [n_samples]
61
                    The target values.
62
                y_pred : numpy 1-D array of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
63
                    The predicted values.
64
65
                    Predicted values are returned before any transformation,
                    e.g. they are raw margin instead of probability of positive class for binary task.
66
                group : numpy 1-D array
67
68
69
                    Group/query data.
                    Only used in the learning-to-rank task.
                    sum(group) = n_samples.
70
71
                    For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
                    where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
72
                grad : list, numpy 1-D array or pandas Series of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
73
74
                    The value of the first order derivative (gradient) of the loss
                    with respect to the elements of y_pred for each sample point.
75
                hess : list, numpy 1-D array or pandas Series of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
76
77
                    The value of the second order derivative (Hessian) of the loss
                    with respect to the elements of y_pred for each sample point.
wxchan's avatar
wxchan committed
78

Nikita Titov's avatar
Nikita Titov committed
79
80
81
82
83
        .. note::

            For multi-class task, the y_pred is group by class_id first, then group by row_id.
            If you want to get i-th row y_pred in j-th class, the access way is y_pred[j * num_data + i]
            and you should group grad and hess in this way as well.
84
85
        """
        self.func = func
wxchan's avatar
wxchan committed
86

87
88
89
90
91
    def __call__(self, preds, dataset):
        """Call passed function with appropriate arguments.

        Parameters
        ----------
92
        preds : numpy 1-D array of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
93
94
95
96
97
98
            The predicted values.
        dataset : Dataset
            The training dataset.

        Returns
        -------
99
        grad : list, numpy 1-D array or pandas Series of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
100
101
            The value of the first order derivative (gradient) of the loss
            with respect to the elements of preds for each sample point.
102
        hess : list, numpy 1-D array or pandas Series of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
103
104
            The value of the second order derivative (Hessian) of the loss
            with respect to the elements of preds for each sample point.
105
        """
wxchan's avatar
wxchan committed
106
        labels = dataset.get_label()
107
        argc = len(signature(self.func).parameters)
108
        if argc == 2:
109
            grad, hess = self.func(labels, preds)
110
        elif argc == 3:
111
            grad, hess = self.func(labels, preds, dataset.get_group())
112
        else:
113
            raise TypeError(f"Self-defined objective function should have 2 or 3 arguments, got {argc}")
wxchan's avatar
wxchan committed
114
115
116
117
118
119
120
121
122
123
124
        """weighted for objective"""
        weight = dataset.get_weight()
        if weight is not None:
            """only one class"""
            if len(weight) == len(grad):
                grad = np.multiply(grad, weight)
                hess = np.multiply(hess, weight)
            else:
                num_data = len(weight)
                num_class = len(grad) // num_data
                if num_class * num_data != len(grad):
125
                    raise ValueError("Length of grad and hess should equal to num_class * num_data")
126
127
                for k in range(num_class):
                    for i in range(num_data):
wxchan's avatar
wxchan committed
128
129
130
131
132
                        idx = k * num_data + i
                        grad[idx] *= weight[i]
                        hess[idx] *= weight[i]
        return grad, hess

wxchan's avatar
wxchan committed
133

134
class _EvalFunctionWrapper:
135
    """Proxy class for evaluation function."""
136

137
    def __init__(self, func: _LGBM_ScikitCustomEvalFunction):
138
        """Construct a proxy class.
139

140
141
        This class transforms evaluation function to match evaluation function with signature ``new_func(preds, dataset)``
        as expected by ``lightgbm.engine.train``.
142

143
144
145
146
147
148
149
150
151
152
        Parameters
        ----------
        func : callable
            Expects a callable with following signatures:
            ``func(y_true, y_pred)``,
            ``func(y_true, y_pred, weight)``
            or ``func(y_true, y_pred, weight, group)``
            and returns (eval_name, eval_result, is_higher_better) or
            list of (eval_name, eval_result, is_higher_better):

153
                y_true : numpy 1-D array of shape = [n_samples]
154
                    The target values.
155
                y_pred : numpy 1-D array of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
156
                    The predicted values.
157
158
                    In case of custom ``objective``, predicted values are returned before any transformation,
                    e.g. they are raw margin instead of probability of positive class for binary task in this case.
159
                weight : numpy 1-D array of shape = [n_samples]
160
                    The weight of samples.
161
                group : numpy 1-D array
162
163
164
                    Group/query data.
                    Only used in the learning-to-rank task.
                    sum(group) = n_samples.
165
166
                    For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
                    where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
167
                eval_name : str
Andrew Ziem's avatar
Andrew Ziem committed
168
                    The name of evaluation function (without whitespace).
169
170
171
172
173
                eval_result : float
                    The eval result.
                is_higher_better : bool
                    Is eval result higher better, e.g. AUC is ``is_higher_better``.

Nikita Titov's avatar
Nikita Titov committed
174
175
176
177
        .. note::

            For multi-class task, the y_pred is group by class_id first, then group by row_id.
            If you want to get i-th row y_pred in j-th class, the access way is y_pred[j * num_data + i].
178
179
        """
        self.func = func
180

181
182
    def __call__(self, preds, dataset):
        """Call passed function with appropriate arguments.
183

184
185
        Parameters
        ----------
186
        preds : numpy 1-D array of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
187
188
189
190
191
192
            The predicted values.
        dataset : Dataset
            The training dataset.

        Returns
        -------
193
        eval_name : str
Andrew Ziem's avatar
Andrew Ziem committed
194
            The name of evaluation function (without whitespace).
195
196
197
198
199
        eval_result : float
            The eval result.
        is_higher_better : bool
            Is eval result higher better, e.g. AUC is ``is_higher_better``.
        """
200
        labels = dataset.get_label()
201
        argc = len(signature(self.func).parameters)
202
        if argc == 2:
203
            return self.func(labels, preds)
204
        elif argc == 3:
205
            return self.func(labels, preds, dataset.get_weight())
206
        elif argc == 4:
207
            return self.func(labels, preds, dataset.get_weight(), dataset.get_group())
208
        else:
209
            raise TypeError(f"Self-defined eval function should have 2, 3 or 4 arguments, got {argc}")
210

wxchan's avatar
wxchan committed
211

212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
# documentation templates for LGBMModel methods are shared between the classes in
# this module and those in the ``dask`` module

_lgbmmodel_doc_fit = (
    """
    Build a gradient boosting model from the training set (X, y).

    Parameters
    ----------
    X : {X_shape}
        Input feature matrix.
    y : {y_shape}
        The target values (class labels in classification, real numbers in regression).
    sample_weight : {sample_weight_shape}
        Weights of training data.
227
    init_score : {init_score_shape}
228
229
230
231
232
233
234
235
236
        Init score of training data.
    group : {group_shape}
        Group/query data.
        Only used in the learning-to-rank task.
        sum(group) = n_samples.
        For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
        where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
    eval_set : list or None, optional (default=None)
        A list of (X, y) tuple pairs to use as validation sets.
237
    eval_names : list of str, or None, optional (default=None)
238
        Names of eval_set.
239
    eval_sample_weight : {eval_sample_weight_shape}
240
241
242
        Weights of eval data.
    eval_class_weight : list or None, optional (default=None)
        Class weights of eval data.
243
    eval_init_score : {eval_init_score_shape}
244
        Init score of eval data.
245
    eval_group : {eval_group_shape}
246
        Group data of eval data.
247
248
    eval_metric : str, callable, list or None, optional (default=None)
        If str, it should be a built-in evaluation metric to use.
249
250
251
252
253
254
255
256
257
258
259
260
        If callable, it should be a custom evaluation metric, see note below for more details.
        If list, it can be a list of built-in metrics, a list of custom evaluation metrics, or a mix of both.
        In either case, the ``metric`` from the model parameters will be evaluated and used as well.
        Default: 'l2' for LGBMRegressor, 'logloss' for LGBMClassifier, 'ndcg' for LGBMRanker.
    early_stopping_rounds : int or None, optional (default=None)
        Activates early stopping. The model will train until the validation score stops improving.
        Validation score needs to improve at least every ``early_stopping_rounds`` round(s)
        to continue training.
        Requires at least one validation data and one metric.
        If there's more than one, will check all of them. But the training data is ignored anyway.
        To check only the first metric, set the ``first_metric_only`` parameter to ``True``
        in additional parameters ``**kwargs`` of the model constructor.
261
    feature_name : list of str, or 'auto', optional (default='auto')
262
263
        Feature names.
        If 'auto' and data is pandas DataFrame, data columns names are used.
264
    categorical_feature : list of str or int, or 'auto', optional (default='auto')
265
266
        Categorical features.
        If list of int, interpreted as indices.
267
        If list of str, interpreted as feature names (need to specify ``feature_name`` as well).
268
269
270
271
272
        If 'auto' and data is pandas DataFrame, pandas unordered categorical columns are used.
        All values in categorical features should be less than int32 max value (2147483647).
        Large values could be memory consuming. Consider using consecutive integers starting from zero.
        All negative values in categorical features will be treated as missing values.
        The output cannot be monotonically constrained with respect to a categorical feature.
273
    callbacks : list of callable, or None, optional (default=None)
274
275
        List of callback functions that are applied at each iteration.
        See Callbacks in Python API for more information.
276
    init_model : str, pathlib.Path, Booster, LGBMModel or None, optional (default=None)
277
278
279
280
        Filename of LightGBM model, Booster instance or LGBMModel instance used for continue training.

    Returns
    -------
281
    self : LGBMModel
282
283
284
285
286
287
288
289
290
291
292
293
294
        Returns self.
    """
)

_lgbmmodel_doc_custom_eval_note = """
    Note
    ----
    Custom eval function expects a callable with following signatures:
    ``func(y_true, y_pred)``, ``func(y_true, y_pred, weight)`` or
    ``func(y_true, y_pred, weight, group)``
    and returns (eval_name, eval_result, is_higher_better) or
    list of (eval_name, eval_result, is_higher_better):

295
        y_true : numpy 1-D array of shape = [n_samples]
296
            The target values.
297
        y_pred : numpy 1-D array of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
298
            The predicted values.
299
300
            In case of custom ``objective``, predicted values are returned before any transformation,
            e.g. they are raw margin instead of probability of positive class for binary task in this case.
301
        weight : numpy 1-D array of shape = [n_samples]
302
            The weight of samples.
303
        group : numpy 1-D array
304
305
306
307
308
            Group/query data.
            Only used in the learning-to-rank task.
            sum(group) = n_samples.
            For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
            where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
309
        eval_name : str
Andrew Ziem's avatar
Andrew Ziem committed
310
            The name of evaluation function (without whitespace).
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
        eval_result : float
            The eval result.
        is_higher_better : bool
            Is eval result higher better, e.g. AUC is ``is_higher_better``.

    For multi-class task, the y_pred is group by class_id first, then group by row_id.
    If you want to get i-th row y_pred in j-th class, the access way is y_pred[j * num_data + i].
"""

_lgbmmodel_doc_predict = (
    """
    {description}

    Parameters
    ----------
    X : {X_shape}
        Input features matrix.
    raw_score : bool, optional (default=False)
        Whether to predict raw scores.
    start_iteration : int, optional (default=0)
        Start index of the iteration to predict.
        If <= 0, starts from the first iteration.
    num_iteration : int or None, optional (default=None)
        Total number of iterations used in the prediction.
        If None, if the best iteration exists and start_iteration <= 0, the best iteration is used;
        otherwise, all iterations from ``start_iteration`` are used (no limits).
        If <= 0, all iterations from ``start_iteration`` are used (no limits).
    pred_leaf : bool, optional (default=False)
        Whether to predict leaf index.
    pred_contrib : bool, optional (default=False)
        Whether to predict feature contributions.

        .. note::

            If you want to get more explanations for your model's predictions using SHAP values,
            like SHAP interaction values,
            you can install the shap package (https://github.com/slundberg/shap).
            Note that unlike the shap package, with ``pred_contrib`` we return a matrix with an extra
            column, where the last column is the expected value.

    **kwargs
        Other parameters for the prediction.

    Returns
    -------
    {output_name} : {predicted_result_shape}
        The predicted values.
    X_leaves : {X_leaves_shape}
        If ``pred_leaf=True``, the predicted leaf of every tree for each sample.
    X_SHAP_values : {X_SHAP_values_shape}
        If ``pred_contrib=True``, the feature contributions for each sample.
    """
)


366
367
class LGBMModel(_LGBMModelBase):
    """Implementation of the scikit-learn API for LightGBM."""
wxchan's avatar
wxchan committed
368

369
370
371
372
373
374
375
376
    def __init__(
        self,
        boosting_type: str = 'gbdt',
        num_leaves: int = 31,
        max_depth: int = -1,
        learning_rate: float = 0.1,
        n_estimators: int = 100,
        subsample_for_bin: int = 200000,
377
        objective: Optional[Union[str, _LGBM_ScikitCustomObjectiveFunction]] = None,
378
379
380
381
382
383
384
385
386
387
388
389
390
391
        class_weight: Optional[Union[Dict, str]] = None,
        min_split_gain: float = 0.,
        min_child_weight: float = 1e-3,
        min_child_samples: int = 20,
        subsample: float = 1.,
        subsample_freq: int = 0,
        colsample_bytree: float = 1.,
        reg_alpha: float = 0.,
        reg_lambda: float = 0.,
        random_state: Optional[Union[int, np.random.RandomState]] = None,
        n_jobs: int = -1,
        importance_type: str = 'split',
        **kwargs
    ):
392
        r"""Construct a gradient boosting model.
wxchan's avatar
wxchan committed
393
394
395

        Parameters
        ----------
396
        boosting_type : str, optional (default='gbdt')
397
398
399
400
401
            'gbdt', traditional Gradient Boosting Decision Tree.
            'dart', Dropouts meet Multiple Additive Regression Trees.
            'goss', Gradient-based One-Side Sampling.
            'rf', Random Forest.
        num_leaves : int, optional (default=31)
wxchan's avatar
wxchan committed
402
            Maximum tree leaves for base learners.
403
        max_depth : int, optional (default=-1)
404
            Maximum tree depth for base learners, <=0 means no limit.
405
        learning_rate : float, optional (default=0.1)
406
            Boosting learning rate.
407
408
409
            You can use ``callbacks`` parameter of ``fit`` method to shrink/adapt learning rate
            in training using ``reset_parameter`` callback.
            Note, that this will ignore the ``learning_rate`` argument in training.
410
        n_estimators : int, optional (default=100)
wxchan's avatar
wxchan committed
411
            Number of boosted trees to fit.
412
        subsample_for_bin : int, optional (default=200000)
wxchan's avatar
wxchan committed
413
            Number of samples for constructing bins.
414
        objective : str, callable or None, optional (default=None)
wxchan's avatar
wxchan committed
415
416
            Specify the learning task and the corresponding learning objective or
            a custom objective function to be used (see note below).
417
            Default: 'regression' for LGBMRegressor, 'binary' or 'multiclass' for LGBMClassifier, 'lambdarank' for LGBMRanker.
418
419
420
421
        class_weight : dict, 'balanced' or None, optional (default=None)
            Weights associated with classes in the form ``{class_label: weight}``.
            Use this parameter only for multi-class classification task;
            for binary classification task you may use ``is_unbalance`` or ``scale_pos_weight`` parameters.
422
423
424
            Note, that the usage of all these parameters will result in poor estimates of the individual class probabilities.
            You may want to consider performing probability calibration
            (https://scikit-learn.org/stable/modules/calibration.html) of your model.
425
426
427
            The 'balanced' mode uses the values of y to automatically adjust weights
            inversely proportional to class frequencies in the input data as ``n_samples / (n_classes * np.bincount(y))``.
            If None, all classes are supposed to have weight one.
428
            Note, that these weights will be multiplied with ``sample_weight`` (passed through the ``fit`` method)
429
            if ``sample_weight`` is specified.
430
        min_split_gain : float, optional (default=0.)
wxchan's avatar
wxchan committed
431
            Minimum loss reduction required to make a further partition on a leaf node of the tree.
432
        min_child_weight : float, optional (default=1e-3)
433
            Minimum sum of instance weight (hessian) needed in a child (leaf).
434
        min_child_samples : int, optional (default=20)
435
            Minimum number of data needed in a child (leaf).
436
        subsample : float, optional (default=1.)
wxchan's avatar
wxchan committed
437
            Subsample ratio of the training instance.
438
        subsample_freq : int, optional (default=0)
Andrew Ziem's avatar
Andrew Ziem committed
439
            Frequency of subsample, <=0 means no enable.
440
        colsample_bytree : float, optional (default=1.)
wxchan's avatar
wxchan committed
441
            Subsample ratio of columns when constructing each tree.
442
        reg_alpha : float, optional (default=0.)
443
            L1 regularization term on weights.
444
        reg_lambda : float, optional (default=0.)
445
            L2 regularization term on weights.
446
        random_state : int, RandomState object or None, optional (default=None)
wxchan's avatar
wxchan committed
447
            Random number seed.
448
449
450
            If int, this number is used to seed the C++ code.
            If RandomState object (numpy), a random integer is picked based on its state to seed the C++ code.
            If None, default seeds in C++ code are used.
451
        n_jobs : int, optional (default=-1)
452
            Number of parallel threads to use for training (can be changed at prediction time).
453
        importance_type : str, optional (default='split')
454
            The type of feature importance to be filled into ``feature_importances_``.
455
456
457
458
            If 'split', result contains numbers of times the feature is used in a model.
            If 'gain', result contains total gains of splits which use the feature.
        **kwargs
            Other parameters for the model.
wxchan's avatar
wxchan committed
459
            Check http://lightgbm.readthedocs.io/en/latest/Parameters.html for more parameters.
460

Nikita Titov's avatar
Nikita Titov committed
461
462
463
            .. warning::

                \*\*kwargs is not supported in sklearn, it may cause unexpected issues.
wxchan's avatar
wxchan committed
464
465
466

        Note
        ----
467
468
        A custom objective function can be provided for the ``objective`` parameter.
        In this case, it should have the signature
469
470
        ``objective(y_true, y_pred) -> grad, hess`` or
        ``objective(y_true, y_pred, group) -> grad, hess``:
wxchan's avatar
wxchan committed
471

472
            y_true : numpy 1-D array of shape = [n_samples]
473
                The target values.
474
            y_pred : numpy 1-D array of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
475
                The predicted values.
476
477
                Predicted values are returned before any transformation,
                e.g. they are raw margin instead of probability of positive class for binary task.
478
            group : numpy 1-D array
479
480
481
                Group/query data.
                Only used in the learning-to-rank task.
                sum(group) = n_samples.
482
483
                For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
                where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
484
            grad : list, numpy 1-D array or pandas Series of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
485
486
                The value of the first order derivative (gradient) of the loss
                with respect to the elements of y_pred for each sample point.
487
            hess : list, numpy 1-D array or pandas Series of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
488
489
                The value of the second order derivative (Hessian) of the loss
                with respect to the elements of y_pred for each sample point.
wxchan's avatar
wxchan committed
490

491
492
493
        For multi-class task, the y_pred is group by class_id first, then group by row_id.
        If you want to get i-th row y_pred in j-th class, the access way is y_pred[j * num_data + i]
        and you should group grad and hess in this way as well.
wxchan's avatar
wxchan committed
494
        """
wxchan's avatar
wxchan committed
495
        if not SKLEARN_INSTALLED:
496
497
            raise LightGBMError('scikit-learn is required for lightgbm.sklearn. '
                                'You must install scikit-learn and restart your session to use this module.')
wxchan's avatar
wxchan committed
498

499
        self.boosting_type = boosting_type
500
        self.objective = objective
wxchan's avatar
wxchan committed
501
502
503
504
        self.num_leaves = num_leaves
        self.max_depth = max_depth
        self.learning_rate = learning_rate
        self.n_estimators = n_estimators
wxchan's avatar
wxchan committed
505
        self.subsample_for_bin = subsample_for_bin
wxchan's avatar
wxchan committed
506
507
508
509
510
511
512
513
        self.min_split_gain = min_split_gain
        self.min_child_weight = min_child_weight
        self.min_child_samples = min_child_samples
        self.subsample = subsample
        self.subsample_freq = subsample_freq
        self.colsample_bytree = colsample_bytree
        self.reg_alpha = reg_alpha
        self.reg_lambda = reg_lambda
514
515
        self.random_state = random_state
        self.n_jobs = n_jobs
516
        self.importance_type = importance_type
517
        self._Booster: Optional[Booster] = None
518
519
520
        self._evals_result = None
        self._best_score = None
        self._best_iteration = None
521
        self._other_params: Dict[str, Any] = {}
522
        self._objective = objective
523
        self.class_weight = class_weight
524
525
        self._class_weight = None
        self._class_map = None
526
        self._n_features = None
527
        self._n_features_in = None
528
529
        self._classes = None
        self._n_classes = None
530
        self.set_params(**kwargs)
wxchan's avatar
wxchan committed
531

Nikita Titov's avatar
Nikita Titov committed
532
    def _more_tags(self):
533
534
535
536
537
538
539
540
541
542
        return {
            'allow_nan': True,
            'X_types': ['2darray', 'sparse', '1dlabels'],
            '_xfail_checks': {
                'check_no_attributes_set_in_init':
                'scikit-learn incorrectly asserts that private attributes '
                'cannot be set in __init__: '
                '(see https://github.com/microsoft/LightGBM/issues/2628)'
            }
        }
Nikita Titov's avatar
Nikita Titov committed
543

544
545
546
    def __sklearn_is_fitted__(self) -> bool:
        return getattr(self, "fitted_", False)

wxchan's avatar
wxchan committed
547
    def get_params(self, deep=True):
548
549
550
551
552
553
554
555
556
557
558
559
560
        """Get parameters for this estimator.

        Parameters
        ----------
        deep : bool, optional (default=True)
            If True, will return the parameters for this estimator and
            contained subobjects that are estimators.

        Returns
        -------
        params : dict
            Parameter names mapped to their values.
        """
561
        params = super().get_params(deep=deep)
562
        params.update(self._other_params)
wxchan's avatar
wxchan committed
563
564
565
        return params

    def set_params(self, **params):
566
567
568
569
570
571
572
573
574
575
576
577
        """Set the parameters of this estimator.

        Parameters
        ----------
        **params
            Parameter names with their new values.

        Returns
        -------
        self : object
            Returns self.
        """
wxchan's avatar
wxchan committed
578
579
        for key, value in params.items():
            setattr(self, key, value)
580
581
            if hasattr(self, f"_{key}"):
                setattr(self, f"_{key}", value)
582
            self._other_params[key] = value
wxchan's avatar
wxchan committed
583
        return self
wxchan's avatar
wxchan committed
584

585
586
587
588
589
590
591
592
593
594
595
596
597
598
    def _process_params(self, stage: str) -> Dict[str, Any]:
        """Process the parameters of this estimator based on its type, parameter aliases, etc.

        Parameters
        ----------
        stage : str
            Name of the stage (can be ``fit`` or ``predict``) this method is called from.

        Returns
        -------
        processed_params : dict
            Processed parameter names mapped to their values.
        """
        assert stage in {"fit", "predict"}
599
600
601
602
603
        params = self.get_params()

        params.pop('objective', None)
        for alias in _ConfigAliases.get('objective'):
            if alias in params:
604
                obj = params.pop(alias)
605
                _log_warning(f"Found '{alias}' in params. Will use it instead of 'objective' argument")
606
607
608
609
610
611
612
613
614
615
616
617
618
                if stage == "fit":
                    self._objective = obj
        if stage == "fit":
            if self._objective is None:
                if isinstance(self, LGBMRegressor):
                    self._objective = "regression"
                elif isinstance(self, LGBMClassifier):
                    if self._n_classes > 2:
                        self._objective = "multiclass"
                    else:
                        self._objective = "binary"
                elif isinstance(self, LGBMRanker):
                    self._objective = "lambdarank"
619
                else:
620
                    raise ValueError("Unknown LGBMModel type.")
621
        if callable(self._objective):
622
623
            if stage == "fit":
                self._fobj = _ObjectiveFunctionWrapper(self._objective)
624
            params['objective'] = 'None'  # objective = nullptr for unknown objective
625
        else:
626
627
            if stage == "fit":
                self._fobj = None
628
            params['objective'] = self._objective
629

630
        params.pop('importance_type', None)
wxchan's avatar
wxchan committed
631
        params.pop('n_estimators', None)
632
        params.pop('class_weight', None)
633

634
635
        if isinstance(params['random_state'], np.random.RandomState):
            params['random_state'] = params['random_state'].randint(np.iinfo(np.int32).max)
636
        if self._n_classes is not None and self._n_classes > 2:
637
638
            for alias in _ConfigAliases.get('num_class'):
                params.pop(alias, None)
639
640
            params['num_class'] = self._n_classes
        if hasattr(self, '_eval_at'):
641
            eval_at = self._eval_at
642
            for alias in _ConfigAliases.get('eval_at'):
643
644
645
646
                if alias in params:
                    _log_warning(f"Found '{alias}' in params. Will use it instead of 'eval_at' argument")
                    eval_at = params.pop(alias)
            params['eval_at'] = eval_at
wxchan's avatar
wxchan committed
647

648
        # register default metric for consistency with callable eval_metric case
649
        original_metric = self._objective if isinstance(self._objective, str) else None
650
651
652
653
654
655
656
657
658
659
        if original_metric is None:
            # try to deduce from class instance
            if isinstance(self, LGBMRegressor):
                original_metric = "l2"
            elif isinstance(self, LGBMClassifier):
                original_metric = "multi_logloss" if self._n_classes > 2 else "binary_logloss"
            elif isinstance(self, LGBMRanker):
                original_metric = "ndcg"

        # overwrite default metric by explicitly set metric
660
        params = _choose_param_value("metric", params, original_metric)
661

662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
        return params

    def fit(self, X, y,
            sample_weight=None, init_score=None, group=None,
            eval_set=None, eval_names=None, eval_sample_weight=None,
            eval_class_weight=None, eval_init_score=None, eval_group=None,
            eval_metric=None, early_stopping_rounds=None,
            feature_name='auto', categorical_feature='auto',
            callbacks=None, init_model=None):
        """Docstring is set after definition, using a template."""
        params = self._process_params(stage="fit")

        # Do not modify original args in fit function
        # Refer to https://github.com/microsoft/LightGBM/pull/2619
        eval_metric_list = copy.deepcopy(eval_metric)
        if not isinstance(eval_metric_list, list):
            eval_metric_list = [eval_metric_list]

        # Separate built-in from callable evaluation metrics
        eval_metrics_callable = [_EvalFunctionWrapper(f) for f in eval_metric_list if callable(f)]
        eval_metrics_builtin = [m for m in eval_metric_list if isinstance(m, str)]

684
        # concatenate metric from params (or default if not provided in params) and eval_metric
685
686
        params['metric'] = [params['metric']] if isinstance(params['metric'], (str, type(None))) else params['metric']
        params['metric'] = [e for e in eval_metrics_builtin if e not in params['metric']] + params['metric']
687
        params['metric'] = [metric for metric in params['metric'] if metric is not None]
wxchan's avatar
wxchan committed
688

689
        if not isinstance(X, (pd_DataFrame, dt_DataTable)):
690
            _X, _y = _LGBMCheckXY(X, y, accept_sparse=True, force_all_finite=False, ensure_min_samples=2)
691
692
            if sample_weight is not None:
                sample_weight = _LGBMCheckSampleWeight(sample_weight, _X)
693
694
        else:
            _X, _y = X, y
695

696
697
698
699
        if self._class_weight is None:
            self._class_weight = self.class_weight
        if self._class_weight is not None:
            class_sample_weight = _LGBMComputeSampleWeight(self._class_weight, y)
700
701
702
703
            if sample_weight is None or len(sample_weight) == 0:
                sample_weight = class_sample_weight
            else:
                sample_weight = np.multiply(sample_weight, class_sample_weight)
704

705
        self._n_features = _X.shape[1]
706
707
        # copy for consistency
        self._n_features_in = self._n_features
708

709
710
        def _construct_dataset(X, y, sample_weight, init_score, group, params,
                               categorical_feature='auto'):
711
            return Dataset(X, label=y, weight=sample_weight, group=group,
712
713
                           init_score=init_score, params=params,
                           categorical_feature=categorical_feature)
Guolin Ke's avatar
Guolin Ke committed
714

715
716
        train_set = _construct_dataset(_X, _y, sample_weight, init_score, group, params,
                                       categorical_feature=categorical_feature)
Guolin Ke's avatar
Guolin Ke committed
717
718
719

        valid_sets = []
        if eval_set is not None:
720

721
            def _get_meta_data(collection, name, i):
722
723
724
725
726
727
728
                if collection is None:
                    return None
                elif isinstance(collection, list):
                    return collection[i] if len(collection) > i else None
                elif isinstance(collection, dict):
                    return collection.get(i, None)
                else:
729
                    raise TypeError(f"{name} should be dict or list")
730

Guolin Ke's avatar
Guolin Ke committed
731
732
733
            if isinstance(eval_set, tuple):
                eval_set = [eval_set]
            for i, valid_data in enumerate(eval_set):
734
                # reduce cost for prediction training data
Guolin Ke's avatar
Guolin Ke committed
735
736
737
                if valid_data[0] is X and valid_data[1] is y:
                    valid_set = train_set
                else:
738
739
740
741
742
743
                    valid_weight = _get_meta_data(eval_sample_weight, 'eval_sample_weight', i)
                    valid_class_weight = _get_meta_data(eval_class_weight, 'eval_class_weight', i)
                    if valid_class_weight is not None:
                        if isinstance(valid_class_weight, dict) and self._class_map is not None:
                            valid_class_weight = {self._class_map[k]: v for k, v in valid_class_weight.items()}
                        valid_class_sample_weight = _LGBMComputeSampleWeight(valid_class_weight, valid_data[1])
744
745
746
747
                        if valid_weight is None or len(valid_weight) == 0:
                            valid_weight = valid_class_sample_weight
                        else:
                            valid_weight = np.multiply(valid_weight, valid_class_sample_weight)
748
749
                    valid_init_score = _get_meta_data(eval_init_score, 'eval_init_score', i)
                    valid_group = _get_meta_data(eval_group, 'eval_group', i)
750
751
                    valid_set = _construct_dataset(valid_data[0], valid_data[1],
                                                   valid_weight, valid_init_score, valid_group, params)
Guolin Ke's avatar
Guolin Ke committed
752
753
                valid_sets.append(valid_set)

754
755
756
        if isinstance(init_model, LGBMModel):
            init_model = init_model.booster_

757
758
759
760
761
762
763
764
        if early_stopping_rounds is not None and early_stopping_rounds > 0:
            _log_warning("'early_stopping_rounds' argument is deprecated and will be removed in a future release of LightGBM. "
                         "Pass 'early_stopping()' callback via 'callbacks' argument instead.")
            params['early_stopping_rounds'] = early_stopping_rounds

        if callbacks is None:
            callbacks = []
        else:
765
            callbacks = copy.copy(callbacks)  # don't use deepcopy here to allow non-serializable objects
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781

        evals_result = {}
        callbacks.append(record_evaluation(evals_result))

        self._Booster = train(
            params=params,
            train_set=train_set,
            num_boost_round=self.n_estimators,
            valid_sets=valid_sets,
            valid_names=eval_names,
            fobj=self._fobj,
            feval=eval_metrics_callable,
            init_model=init_model,
            feature_name=feature_name,
            callbacks=callbacks
        )
wxchan's avatar
wxchan committed
782
783

        if evals_result:
784
            self._evals_result = evals_result
785
786
        else:  # reset after previous call to fit()
            self._evals_result = None
wxchan's avatar
wxchan committed
787

788
        if self._Booster.best_iteration != 0:
789
            self._best_iteration = self._Booster.best_iteration
790
791
        else:  # reset after previous call to fit()
            self._best_iteration = None
792
793

        self._best_score = self._Booster.best_score
wxchan's avatar
wxchan committed
794

795
796
        self.fitted_ = True

wxchan's avatar
wxchan committed
797
        # free dataset
798
        self._Booster.free_dataset()
wxchan's avatar
wxchan committed
799
        del train_set, valid_sets
wxchan's avatar
wxchan committed
800
801
        return self

802
803
804
805
    fit.__doc__ = _lgbmmodel_doc_fit.format(
        X_shape="array-like or sparse matrix of shape = [n_samples, n_features]",
        y_shape="array-like of shape = [n_samples]",
        sample_weight_shape="array-like of shape = [n_samples] or None, optional (default=None)",
806
        init_score_shape="array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task) or shape = [n_samples, n_classes] (for multi-class task) or None, optional (default=None)",
807
        group_shape="array-like or None, optional (default=None)",
808
809
810
        eval_sample_weight_shape="list of array, or None, optional (default=None)",
        eval_init_score_shape="list of array, or None, optional (default=None)",
        eval_group_shape="list of array, or None, optional (default=None)"
811
812
    ) + "\n\n" + _lgbmmodel_doc_custom_eval_note

813
    def predict(self, X, raw_score=False, start_iteration=0, num_iteration=None,
814
                pred_leaf=False, pred_contrib=False, **kwargs):
815
        """Docstring is set after definition, using a template."""
816
        if not self.__sklearn_is_fitted__():
817
            raise LGBMNotFittedError("Estimator not fitted, call fit before exploiting the model.")
818
        if not isinstance(X, (pd_DataFrame, dt_DataTable)):
819
            X = _LGBMCheckArray(X, accept_sparse=True, force_all_finite=False)
820
821
822
        n_features = X.shape[1]
        if self._n_features != n_features:
            raise ValueError("Number of features of the model must "
823
824
                             f"match the input. Model n_features_ is {self._n_features} and "
                             f"input n_features is {n_features}")
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
        # retrive original params that possibly can be used in both training and prediction
        # and then overwrite them (considering aliases) with params that were passed directly in prediction
        predict_params = self._process_params(stage="predict")
        for alias in _ConfigAliases.get_by_alias(
            "data",
            "X",
            "raw_score",
            "start_iteration",
            "num_iteration",
            "pred_leaf",
            "pred_contrib",
            *kwargs.keys()
        ):
            predict_params.pop(alias, None)
        predict_params.update(kwargs)
840
        return self._Booster.predict(X, raw_score=raw_score, start_iteration=start_iteration, num_iteration=num_iteration,
841
                                     pred_leaf=pred_leaf, pred_contrib=pred_contrib, **predict_params)
wxchan's avatar
wxchan committed
842

843
844
845
846
847
848
849
850
851
    predict.__doc__ = _lgbmmodel_doc_predict.format(
        description="Return the predicted value for each sample.",
        X_shape="array-like or sparse matrix of shape = [n_samples, n_features]",
        output_name="predicted_result",
        predicted_result_shape="array-like of shape = [n_samples] or shape = [n_samples, n_classes]",
        X_leaves_shape="array-like of shape = [n_samples, n_trees] or shape = [n_samples, n_trees * n_classes]",
        X_SHAP_values_shape="array-like of shape = [n_samples, n_features + 1] or shape = [n_samples, (n_features + 1) * n_classes] or list with n_classes length of such objects"
    )

852
853
    @property
    def n_features_(self):
854
        """:obj:`int`: The number of features of fitted model."""
855
        if not self.__sklearn_is_fitted__():
856
857
858
            raise LGBMNotFittedError('No n_features found. Need to call fit beforehand.')
        return self._n_features

859
860
861
    @property
    def n_features_in_(self):
        """:obj:`int`: The number of features of fitted model."""
862
        if not self.__sklearn_is_fitted__():
863
864
865
            raise LGBMNotFittedError('No n_features_in found. Need to call fit beforehand.')
        return self._n_features_in

866
867
    @property
    def best_score_(self):
868
        """:obj:`dict`: The best score of fitted model."""
869
        if not self.__sklearn_is_fitted__():
870
871
872
873
874
            raise LGBMNotFittedError('No best_score found. Need to call fit beforehand.')
        return self._best_score

    @property
    def best_iteration_(self):
875
        """:obj:`int` or :obj:`None`: The best iteration of fitted model if ``early_stopping()`` callback has been specified."""
876
        if not self.__sklearn_is_fitted__():
877
            raise LGBMNotFittedError('No best_iteration found. Need to call fit with early_stopping callback beforehand.')
878
879
880
881
        return self._best_iteration

    @property
    def objective_(self):
882
        """:obj:`str` or :obj:`callable`: The concrete objective used while fitting this model."""
883
        if not self.__sklearn_is_fitted__():
884
885
886
            raise LGBMNotFittedError('No objective found. Need to call fit beforehand.')
        return self._objective

887
888
889
890
891
892
893
894
895
    @property
    def n_estimators_(self) -> int:
        """:obj:`int`: True number of boosting iterations performed.

        This might be less than parameter ``n_estimators`` if early stopping was enabled or
        if boosting stopped early due to limits on complexity like ``min_gain_to_split``.
        """
        if not self.__sklearn_is_fitted__():
            raise LGBMNotFittedError('No n_estimators found. Need to call fit beforehand.')
896
        return self._Booster.current_iteration()  # type: ignore
897
898
899
900
901
902
903
904
905
906

    @property
    def n_iter_(self) -> int:
        """:obj:`int`: True number of boosting iterations performed.

        This might be less than parameter ``n_estimators`` if early stopping was enabled or
        if boosting stopped early due to limits on complexity like ``min_gain_to_split``.
        """
        if not self.__sklearn_is_fitted__():
            raise LGBMNotFittedError('No n_iter found. Need to call fit beforehand.')
907
        return self._Booster.current_iteration()  # type: ignore
908

909
910
    @property
    def booster_(self):
911
        """Booster: The underlying Booster of this model."""
912
        if not self.__sklearn_is_fitted__():
913
            raise LGBMNotFittedError('No booster found. Need to call fit beforehand.')
914
        return self._Booster
wxchan's avatar
wxchan committed
915

916
917
    @property
    def evals_result_(self):
918
        """:obj:`dict` or :obj:`None`: The evaluation results if validation sets have been specified."""
919
        if not self.__sklearn_is_fitted__():
920
921
            raise LGBMNotFittedError('No results found. Need to call fit with eval_set beforehand.')
        return self._evals_result
922
923

    @property
924
    def feature_importances_(self):
925
        """:obj:`array` of shape = [n_features]: The feature importances (the higher, the more important).
926

Nikita Titov's avatar
Nikita Titov committed
927
928
929
930
        .. note::

            ``importance_type`` attribute is passed to the function
            to configure the type of importance values to be extracted.
931
        """
932
        if not self.__sklearn_is_fitted__():
933
            raise LGBMNotFittedError('No feature_importances found. Need to call fit beforehand.')
934
        return self._Booster.feature_importance(importance_type=self.importance_type)
wxchan's avatar
wxchan committed
935

936
937
    @property
    def feature_name_(self):
938
        """:obj:`array` of shape = [n_features]: The names of features."""
939
        if not self.__sklearn_is_fitted__():
940
941
942
            raise LGBMNotFittedError('No feature_name found. Need to call fit beforehand.')
        return self._Booster.feature_name()

wxchan's avatar
wxchan committed
943

944
class LGBMRegressor(_LGBMRegressorBase, LGBMModel):
945
    """LightGBM regressor."""
wxchan's avatar
wxchan committed
946

Guolin Ke's avatar
Guolin Ke committed
947
948
    def fit(self, X, y,
            sample_weight=None, init_score=None,
949
            eval_set=None, eval_names=None, eval_sample_weight=None,
950
            eval_init_score=None, eval_metric=None, early_stopping_rounds=None,
951
            feature_name='auto', categorical_feature='auto',
952
            callbacks=None, init_model=None):
953
        """Docstring is inherited from the LGBMModel."""
954
955
956
        super().fit(X, y, sample_weight=sample_weight, init_score=init_score,
                    eval_set=eval_set, eval_names=eval_names, eval_sample_weight=eval_sample_weight,
                    eval_init_score=eval_init_score, eval_metric=eval_metric,
957
                    early_stopping_rounds=early_stopping_rounds, feature_name=feature_name,
958
                    categorical_feature=categorical_feature, callbacks=callbacks, init_model=init_model)
Guolin Ke's avatar
Guolin Ke committed
959
960
        return self

961
    _base_doc = LGBMModel.fit.__doc__.replace("self : LGBMModel", "self : LGBMRegressor")  # type: ignore
962
963
    _base_doc = (_base_doc[:_base_doc.find('group :')]  # type: ignore
                 + _base_doc[_base_doc.find('eval_set :'):])  # type: ignore
964
965
966
967
    _base_doc = (_base_doc[:_base_doc.find('eval_class_weight :')]
                 + _base_doc[_base_doc.find('eval_init_score :'):])
    fit.__doc__ = (_base_doc[:_base_doc.find('eval_group :')]
                   + _base_doc[_base_doc.find('eval_metric :'):])
wxchan's avatar
wxchan committed
968

969

970
class LGBMClassifier(_LGBMClassifierBase, LGBMModel):
971
    """LightGBM classifier."""
wxchan's avatar
wxchan committed
972

Guolin Ke's avatar
Guolin Ke committed
973
974
    def fit(self, X, y,
            sample_weight=None, init_score=None,
975
            eval_set=None, eval_names=None, eval_sample_weight=None,
976
            eval_class_weight=None, eval_init_score=None, eval_metric=None,
977
            early_stopping_rounds=None,
978
979
            feature_name='auto', categorical_feature='auto',
            callbacks=None, init_model=None):
980
        """Docstring is inherited from the LGBMModel."""
981
        _LGBMAssertAllFinite(y)
982
983
        _LGBMCheckClassificationTargets(y)
        self._le = _LGBMLabelEncoder().fit(y)
984
        _y = self._le.transform(y)
985
        self._class_map = dict(zip(self._le.classes_, self._le.transform(self._le.classes_)))
986
987
        if isinstance(self.class_weight, dict):
            self._class_weight = {self._class_map[k]: v for k, v in self.class_weight.items()}
988

989
990
        self._classes = self._le.classes_
        self._n_classes = len(self._classes)
991
992

        if not callable(eval_metric):
993
            if isinstance(eval_metric, (str, type(None))):
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
                eval_metric = [eval_metric]
            if self._n_classes > 2:
                for index, metric in enumerate(eval_metric):
                    if metric in {'logloss', 'binary_logloss'}:
                        eval_metric[index] = "multi_logloss"
                    elif metric in {'error', 'binary_error'}:
                        eval_metric[index] = "multi_error"
            else:
                for index, metric in enumerate(eval_metric):
                    if metric in {'logloss', 'multi_logloss'}:
                        eval_metric[index] = 'binary_logloss'
                    elif metric in {'error', 'multi_error'}:
                        eval_metric[index] = 'binary_error'
wxchan's avatar
wxchan committed
1007

1008
1009
        # do not modify args, as it causes errors in model selection tools
        valid_sets = None
wxchan's avatar
wxchan committed
1010
        if eval_set is not None:
1011
1012
            if isinstance(eval_set, tuple):
                eval_set = [eval_set]
1013
            valid_sets = [None] * len(eval_set)
1014
1015
            for i, (valid_x, valid_y) in enumerate(eval_set):
                if valid_x is X and valid_y is y:
1016
                    valid_sets[i] = (valid_x, _y)
1017
                else:
1018
                    valid_sets[i] = (valid_x, self._le.transform(valid_y))
1019

1020
1021
1022
1023
        super().fit(X, _y, sample_weight=sample_weight, init_score=init_score, eval_set=valid_sets,
                    eval_names=eval_names, eval_sample_weight=eval_sample_weight,
                    eval_class_weight=eval_class_weight, eval_init_score=eval_init_score,
                    eval_metric=eval_metric, early_stopping_rounds=early_stopping_rounds,
1024
                    feature_name=feature_name, categorical_feature=categorical_feature,
1025
                    callbacks=callbacks, init_model=init_model)
wxchan's avatar
wxchan committed
1026
1027
        return self

1028
    _base_doc = LGBMModel.fit.__doc__.replace("self : LGBMModel", "self : LGBMClassifier")  # type: ignore
1029
1030
    _base_doc = (_base_doc[:_base_doc.find('group :')]  # type: ignore
                 + _base_doc[_base_doc.find('eval_set :'):])  # type: ignore
1031
1032
    fit.__doc__ = (_base_doc[:_base_doc.find('eval_group :')]
                   + _base_doc[_base_doc.find('eval_metric :'):])
1033

1034
    def predict(self, X, raw_score=False, start_iteration=0, num_iteration=None,
1035
                pred_leaf=False, pred_contrib=False, **kwargs):
1036
        """Docstring is inherited from the LGBMModel."""
1037
        result = self.predict_proba(X, raw_score, start_iteration, num_iteration,
1038
                                    pred_leaf, pred_contrib, **kwargs)
1039
        if callable(self._objective) or raw_score or pred_leaf or pred_contrib:
1040
1041
1042
1043
            return result
        else:
            class_index = np.argmax(result, axis=1)
            return self._le.inverse_transform(class_index)
wxchan's avatar
wxchan committed
1044

1045
1046
    predict.__doc__ = LGBMModel.predict.__doc__

1047
    def predict_proba(self, X, raw_score=False, start_iteration=0, num_iteration=None,
1048
                      pred_leaf=False, pred_contrib=False, **kwargs):
1049
        """Docstring is set after definition, using a template."""
1050
        result = super().predict(X, raw_score, start_iteration, num_iteration, pred_leaf, pred_contrib, **kwargs)
1051
        if callable(self._objective) and not (raw_score or pred_leaf or pred_contrib):
1052
1053
1054
            _log_warning("Cannot compute class probabilities or labels "
                         "due to the usage of customized objective function.\n"
                         "Returning raw scores instead.")
1055
1056
            return result
        elif self._n_classes > 2 or raw_score or pred_leaf or pred_contrib:
1057
            return result
wxchan's avatar
wxchan committed
1058
        else:
1059
            return np.vstack((1. - result, result)).transpose()
1060

1061
1062
1063
1064
    predict_proba.__doc__ = _lgbmmodel_doc_predict.format(
        description="Return the predicted probability for each class for each sample.",
        X_shape="array-like or sparse matrix of shape = [n_samples, n_features]",
        output_name="predicted_probability",
1065
        predicted_result_shape="array-like of shape = [n_samples] or shape = [n_samples, n_classes]",
1066
1067
1068
1069
        X_leaves_shape="array-like of shape = [n_samples, n_trees] or shape = [n_samples, n_trees * n_classes]",
        X_SHAP_values_shape="array-like of shape = [n_samples, n_features + 1] or shape = [n_samples, (n_features + 1) * n_classes] or list with n_classes length of such objects"
    )

1070
1071
    @property
    def classes_(self):
1072
        """:obj:`array` of shape = [n_classes]: The class label array."""
1073
        if not self.__sklearn_is_fitted__():
1074
1075
            raise LGBMNotFittedError('No classes found. Need to call fit beforehand.')
        return self._classes
1076
1077
1078

    @property
    def n_classes_(self):
1079
        """:obj:`int`: The number of classes."""
1080
        if not self.__sklearn_is_fitted__():
1081
1082
            raise LGBMNotFittedError('No classes found. Need to call fit beforehand.')
        return self._n_classes
wxchan's avatar
wxchan committed
1083

wxchan's avatar
wxchan committed
1084

wxchan's avatar
wxchan committed
1085
class LGBMRanker(LGBMModel):
1086
1087
1088
1089
1090
1091
1092
1093
    """LightGBM ranker.

    .. warning::

        scikit-learn doesn't support ranking applications yet,
        therefore this class is not really compatible with the sklearn ecosystem.
        Please use this class mainly for training and applying ranking models in common sklearnish way.
    """
wxchan's avatar
wxchan committed
1094

Guolin Ke's avatar
Guolin Ke committed
1095
    def fit(self, X, y,
1096
            sample_weight=None, init_score=None, group=None,
1097
            eval_set=None, eval_names=None, eval_sample_weight=None,
1098
            eval_init_score=None, eval_group=None, eval_metric=None,
1099
            eval_at=(1, 2, 3, 4, 5), early_stopping_rounds=None,
1100
1101
            feature_name='auto', categorical_feature='auto',
            callbacks=None, init_model=None):
1102
        """Docstring is inherited from the LGBMModel."""
1103
        # check group data
Guolin Ke's avatar
Guolin Ke committed
1104
        if group is None:
1105
            raise ValueError("Should set group for ranking task")
wxchan's avatar
wxchan committed
1106
1107

        if eval_set is not None:
Guolin Ke's avatar
Guolin Ke committed
1108
            if eval_group is None:
1109
                raise ValueError("Eval_group cannot be None when eval_set is not None")
Guolin Ke's avatar
Guolin Ke committed
1110
            elif len(eval_group) != len(eval_set):
1111
                raise ValueError("Length of eval_group should be equal to eval_set")
1112
            elif (isinstance(eval_group, dict)
1113
                  and any(i not in eval_group or eval_group[i] is None for i in range(len(eval_group)))
1114
1115
                  or isinstance(eval_group, list)
                  and any(group is None for group in eval_group)):
1116
1117
                raise ValueError("Should set group for all eval datasets for ranking task; "
                                 "if you use dict, the index should start from 0")
1118

1119
        self._eval_at = eval_at
1120
1121
1122
        super().fit(X, y, sample_weight=sample_weight, init_score=init_score, group=group,
                    eval_set=eval_set, eval_names=eval_names, eval_sample_weight=eval_sample_weight,
                    eval_init_score=eval_init_score, eval_group=eval_group, eval_metric=eval_metric,
1123
                    early_stopping_rounds=early_stopping_rounds, feature_name=feature_name,
1124
                    categorical_feature=categorical_feature, callbacks=callbacks, init_model=init_model)
wxchan's avatar
wxchan committed
1125
        return self
1126

1127
    _base_doc = LGBMModel.fit.__doc__.replace("self : LGBMModel", "self : LGBMRanker")  # type: ignore
1128
1129
    fit.__doc__ = (_base_doc[:_base_doc.find('eval_class_weight :')]  # type: ignore
                   + _base_doc[_base_doc.find('eval_init_score :'):])  # type: ignore
1130
    _base_doc = fit.__doc__
1131
    _before_early_stop, _early_stop, _after_early_stop = _base_doc.partition('early_stopping_rounds :')
1132
1133
1134
    fit.__doc__ = f"""{_before_early_stop}eval_at : iterable of int, optional (default=(1, 2, 3, 4, 5))
        The evaluation positions of the specified metric.
    {_early_stop}{_after_early_stop}"""