sklearn.py 52.9 KB
Newer Older
wxchan's avatar
wxchan committed
1
# coding: utf-8
2
"""Scikit-learn wrapper interface for LightGBM."""
3
import copy
4
from inspect import signature
5
from typing import Callable, Dict, Optional, Union
6

wxchan's avatar
wxchan committed
7
import numpy as np
8

9
from .basic import Dataset, LightGBMError, _choose_param_value, _ConfigAliases, _log_warning
10
from .callback import log_evaluation, record_evaluation
11
12
13
from .compat import (SKLEARN_INSTALLED, LGBMNotFittedError, _LGBMAssertAllFinite, _LGBMCheckArray,
                     _LGBMCheckClassificationTargets, _LGBMCheckSampleWeight, _LGBMCheckXY, _LGBMClassifierBase,
                     _LGBMComputeSampleWeight, _LGBMLabelEncoder, _LGBMModelBase, _LGBMRegressorBase, dt_DataTable,
14
                     pd_DataFrame)
wxchan's avatar
wxchan committed
15
from .engine import train
16

wxchan's avatar
wxchan committed
17

18
class _ObjectiveFunctionWrapper:
19
    """Proxy class for objective function."""
20

21
22
    def __init__(self, func):
        """Construct a proxy class.
23

24
25
        This class transforms objective function to match objective function with signature ``new_func(preds, dataset)``
        as expected by ``lightgbm.engine.train``.
26

27
28
29
        Parameters
        ----------
        func : callable
30
            Expects a callable with signature ``func(y_true, y_pred)`` or ``func(y_true, y_pred, group)``
31
32
33
34
35
36
            and returns (grad, hess):

                y_true : array-like of shape = [n_samples]
                    The target values.
                y_pred : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
                    The predicted values.
37
38
                    Predicted values are returned before any transformation,
                    e.g. they are raw margin instead of probability of positive class for binary task.
39
                group : array-like
40
41
42
                    Group/query data.
                    Only used in the learning-to-rank task.
                    sum(group) = n_samples.
43
44
                    For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
                    where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
45
                grad : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
46
47
                    The value of the first order derivative (gradient) of the loss
                    with respect to the elements of y_pred for each sample point.
48
                hess : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
49
50
                    The value of the second order derivative (Hessian) of the loss
                    with respect to the elements of y_pred for each sample point.
wxchan's avatar
wxchan committed
51

Nikita Titov's avatar
Nikita Titov committed
52
53
54
55
56
        .. note::

            For multi-class task, the y_pred is group by class_id first, then group by row_id.
            If you want to get i-th row y_pred in j-th class, the access way is y_pred[j * num_data + i]
            and you should group grad and hess in this way as well.
57
58
        """
        self.func = func
wxchan's avatar
wxchan committed
59

60
61
62
63
64
65
66
67
68
69
70
71
72
    def __call__(self, preds, dataset):
        """Call passed function with appropriate arguments.

        Parameters
        ----------
        preds : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
            The predicted values.
        dataset : Dataset
            The training dataset.

        Returns
        -------
        grad : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
73
74
            The value of the first order derivative (gradient) of the loss
            with respect to the elements of preds for each sample point.
75
        hess : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
76
77
            The value of the second order derivative (Hessian) of the loss
            with respect to the elements of preds for each sample point.
78
        """
wxchan's avatar
wxchan committed
79
        labels = dataset.get_label()
80
        argc = len(signature(self.func).parameters)
81
        if argc == 2:
82
            grad, hess = self.func(labels, preds)
83
        elif argc == 3:
84
            grad, hess = self.func(labels, preds, dataset.get_group())
85
        else:
86
            raise TypeError(f"Self-defined objective function should have 2 or 3 arguments, got {argc}")
wxchan's avatar
wxchan committed
87
88
89
90
91
92
93
94
95
96
97
        """weighted for objective"""
        weight = dataset.get_weight()
        if weight is not None:
            """only one class"""
            if len(weight) == len(grad):
                grad = np.multiply(grad, weight)
                hess = np.multiply(hess, weight)
            else:
                num_data = len(weight)
                num_class = len(grad) // num_data
                if num_class * num_data != len(grad):
98
                    raise ValueError("Length of grad and hess should equal to num_class * num_data")
99
100
                for k in range(num_class):
                    for i in range(num_data):
wxchan's avatar
wxchan committed
101
102
103
104
105
                        idx = k * num_data + i
                        grad[idx] *= weight[i]
                        hess[idx] *= weight[i]
        return grad, hess

wxchan's avatar
wxchan committed
106

107
class _EvalFunctionWrapper:
108
    """Proxy class for evaluation function."""
109

110
111
    def __init__(self, func):
        """Construct a proxy class.
112

113
114
        This class transforms evaluation function to match evaluation function with signature ``new_func(preds, dataset)``
        as expected by ``lightgbm.engine.train``.
115

116
117
118
119
120
121
122
123
124
125
126
127
128
129
        Parameters
        ----------
        func : callable
            Expects a callable with following signatures:
            ``func(y_true, y_pred)``,
            ``func(y_true, y_pred, weight)``
            or ``func(y_true, y_pred, weight, group)``
            and returns (eval_name, eval_result, is_higher_better) or
            list of (eval_name, eval_result, is_higher_better):

                y_true : array-like of shape = [n_samples]
                    The target values.
                y_pred : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
                    The predicted values.
130
131
                    In case of custom ``objective``, predicted values are returned before any transformation,
                    e.g. they are raw margin instead of probability of positive class for binary task in this case.
132
133
134
                weight : array-like of shape = [n_samples]
                    The weight of samples.
                group : array-like
135
136
137
                    Group/query data.
                    Only used in the learning-to-rank task.
                    sum(group) = n_samples.
138
139
                    For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
                    where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
140
                eval_name : str
Andrew Ziem's avatar
Andrew Ziem committed
141
                    The name of evaluation function (without whitespace).
142
143
144
145
146
                eval_result : float
                    The eval result.
                is_higher_better : bool
                    Is eval result higher better, e.g. AUC is ``is_higher_better``.

Nikita Titov's avatar
Nikita Titov committed
147
148
149
150
        .. note::

            For multi-class task, the y_pred is group by class_id first, then group by row_id.
            If you want to get i-th row y_pred in j-th class, the access way is y_pred[j * num_data + i].
151
152
        """
        self.func = func
153

154
155
    def __call__(self, preds, dataset):
        """Call passed function with appropriate arguments.
156

157
158
159
160
161
162
163
164
165
        Parameters
        ----------
        preds : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
            The predicted values.
        dataset : Dataset
            The training dataset.

        Returns
        -------
166
        eval_name : str
Andrew Ziem's avatar
Andrew Ziem committed
167
            The name of evaluation function (without whitespace).
168
169
170
171
172
        eval_result : float
            The eval result.
        is_higher_better : bool
            Is eval result higher better, e.g. AUC is ``is_higher_better``.
        """
173
        labels = dataset.get_label()
174
        argc = len(signature(self.func).parameters)
175
        if argc == 2:
176
            return self.func(labels, preds)
177
        elif argc == 3:
178
            return self.func(labels, preds, dataset.get_weight())
179
        elif argc == 4:
180
            return self.func(labels, preds, dataset.get_weight(), dataset.get_group())
181
        else:
182
            raise TypeError(f"Self-defined eval function should have 2, 3 or 4 arguments, got {argc}")
183

wxchan's avatar
wxchan committed
184

185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
# documentation templates for LGBMModel methods are shared between the classes in
# this module and those in the ``dask`` module

_lgbmmodel_doc_fit = (
    """
    Build a gradient boosting model from the training set (X, y).

    Parameters
    ----------
    X : {X_shape}
        Input feature matrix.
    y : {y_shape}
        The target values (class labels in classification, real numbers in regression).
    sample_weight : {sample_weight_shape}
        Weights of training data.
200
    init_score : {init_score_shape}
201
202
203
204
205
206
207
208
209
        Init score of training data.
    group : {group_shape}
        Group/query data.
        Only used in the learning-to-rank task.
        sum(group) = n_samples.
        For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
        where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
    eval_set : list or None, optional (default=None)
        A list of (X, y) tuple pairs to use as validation sets.
210
    eval_names : list of str, or None, optional (default=None)
211
        Names of eval_set.
212
    eval_sample_weight : {eval_sample_weight_shape}
213
214
215
        Weights of eval data.
    eval_class_weight : list or None, optional (default=None)
        Class weights of eval data.
216
    eval_init_score : {eval_init_score_shape}
217
        Init score of eval data.
218
    eval_group : {eval_group_shape}
219
        Group data of eval data.
220
221
    eval_metric : str, callable, list or None, optional (default=None)
        If str, it should be a built-in evaluation metric to use.
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
        If callable, it should be a custom evaluation metric, see note below for more details.
        If list, it can be a list of built-in metrics, a list of custom evaluation metrics, or a mix of both.
        In either case, the ``metric`` from the model parameters will be evaluated and used as well.
        Default: 'l2' for LGBMRegressor, 'logloss' for LGBMClassifier, 'ndcg' for LGBMRanker.
    early_stopping_rounds : int or None, optional (default=None)
        Activates early stopping. The model will train until the validation score stops improving.
        Validation score needs to improve at least every ``early_stopping_rounds`` round(s)
        to continue training.
        Requires at least one validation data and one metric.
        If there's more than one, will check all of them. But the training data is ignored anyway.
        To check only the first metric, set the ``first_metric_only`` parameter to ``True``
        in additional parameters ``**kwargs`` of the model constructor.
    verbose : bool or int, optional (default=True)
        Requires at least one evaluation data.
        If True, the eval metric on the eval set is printed at each boosting stage.
        If int, the eval metric on the eval set is printed at every ``verbose`` boosting stage.
        The last boosting stage or the boosting stage found by using ``early_stopping_rounds`` is also printed.

        .. rubric:: Example

        With ``verbose`` = 4 and at least one item in ``eval_set``,
        an evaluation metric is printed every 4 (instead of 1) boosting stages.

245
    feature_name : list of str, or 'auto', optional (default='auto')
246
247
        Feature names.
        If 'auto' and data is pandas DataFrame, data columns names are used.
248
    categorical_feature : list of str or int, or 'auto', optional (default='auto')
249
250
        Categorical features.
        If list of int, interpreted as indices.
251
        If list of str, interpreted as feature names (need to specify ``feature_name`` as well).
252
253
254
255
256
        If 'auto' and data is pandas DataFrame, pandas unordered categorical columns are used.
        All values in categorical features should be less than int32 max value (2147483647).
        Large values could be memory consuming. Consider using consecutive integers starting from zero.
        All negative values in categorical features will be treated as missing values.
        The output cannot be monotonically constrained with respect to a categorical feature.
257
    callbacks : list of callable, or None, optional (default=None)
258
259
        List of callback functions that are applied at each iteration.
        See Callbacks in Python API for more information.
260
    init_model : str, pathlib.Path, Booster, LGBMModel or None, optional (default=None)
261
262
263
264
        Filename of LightGBM model, Booster instance or LGBMModel instance used for continue training.

    Returns
    -------
265
    self : LGBMModel
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
        Returns self.
    """
)

_lgbmmodel_doc_custom_eval_note = """
    Note
    ----
    Custom eval function expects a callable with following signatures:
    ``func(y_true, y_pred)``, ``func(y_true, y_pred, weight)`` or
    ``func(y_true, y_pred, weight, group)``
    and returns (eval_name, eval_result, is_higher_better) or
    list of (eval_name, eval_result, is_higher_better):

        y_true : array-like of shape = [n_samples]
            The target values.
        y_pred : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
            The predicted values.
283
284
            In case of custom ``objective``, predicted values are returned before any transformation,
            e.g. they are raw margin instead of probability of positive class for binary task in this case.
285
286
287
288
289
290
291
292
        weight : array-like of shape = [n_samples]
            The weight of samples.
        group : array-like
            Group/query data.
            Only used in the learning-to-rank task.
            sum(group) = n_samples.
            For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
            where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
293
        eval_name : str
Andrew Ziem's avatar
Andrew Ziem committed
294
            The name of evaluation function (without whitespace).
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
        eval_result : float
            The eval result.
        is_higher_better : bool
            Is eval result higher better, e.g. AUC is ``is_higher_better``.

    For multi-class task, the y_pred is group by class_id first, then group by row_id.
    If you want to get i-th row y_pred in j-th class, the access way is y_pred[j * num_data + i].
"""

_lgbmmodel_doc_predict = (
    """
    {description}

    Parameters
    ----------
    X : {X_shape}
        Input features matrix.
    raw_score : bool, optional (default=False)
        Whether to predict raw scores.
    start_iteration : int, optional (default=0)
        Start index of the iteration to predict.
        If <= 0, starts from the first iteration.
    num_iteration : int or None, optional (default=None)
        Total number of iterations used in the prediction.
        If None, if the best iteration exists and start_iteration <= 0, the best iteration is used;
        otherwise, all iterations from ``start_iteration`` are used (no limits).
        If <= 0, all iterations from ``start_iteration`` are used (no limits).
    pred_leaf : bool, optional (default=False)
        Whether to predict leaf index.
    pred_contrib : bool, optional (default=False)
        Whether to predict feature contributions.

        .. note::

            If you want to get more explanations for your model's predictions using SHAP values,
            like SHAP interaction values,
            you can install the shap package (https://github.com/slundberg/shap).
            Note that unlike the shap package, with ``pred_contrib`` we return a matrix with an extra
            column, where the last column is the expected value.

    **kwargs
        Other parameters for the prediction.

    Returns
    -------
    {output_name} : {predicted_result_shape}
        The predicted values.
    X_leaves : {X_leaves_shape}
        If ``pred_leaf=True``, the predicted leaf of every tree for each sample.
    X_SHAP_values : {X_SHAP_values_shape}
        If ``pred_contrib=True``, the feature contributions for each sample.
    """
)


350
351
class LGBMModel(_LGBMModelBase):
    """Implementation of the scikit-learn API for LightGBM."""
wxchan's avatar
wxchan committed
352

353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
    def __init__(
        self,
        boosting_type: str = 'gbdt',
        num_leaves: int = 31,
        max_depth: int = -1,
        learning_rate: float = 0.1,
        n_estimators: int = 100,
        subsample_for_bin: int = 200000,
        objective: Optional[Union[str, Callable]] = None,
        class_weight: Optional[Union[Dict, str]] = None,
        min_split_gain: float = 0.,
        min_child_weight: float = 1e-3,
        min_child_samples: int = 20,
        subsample: float = 1.,
        subsample_freq: int = 0,
        colsample_bytree: float = 1.,
        reg_alpha: float = 0.,
        reg_lambda: float = 0.,
        random_state: Optional[Union[int, np.random.RandomState]] = None,
        n_jobs: int = -1,
373
        silent: Union[bool, str] = 'warn',
374
375
376
        importance_type: str = 'split',
        **kwargs
    ):
377
        r"""Construct a gradient boosting model.
wxchan's avatar
wxchan committed
378
379
380

        Parameters
        ----------
381
        boosting_type : str, optional (default='gbdt')
382
383
384
385
386
            'gbdt', traditional Gradient Boosting Decision Tree.
            'dart', Dropouts meet Multiple Additive Regression Trees.
            'goss', Gradient-based One-Side Sampling.
            'rf', Random Forest.
        num_leaves : int, optional (default=31)
wxchan's avatar
wxchan committed
387
            Maximum tree leaves for base learners.
388
        max_depth : int, optional (default=-1)
389
            Maximum tree depth for base learners, <=0 means no limit.
390
        learning_rate : float, optional (default=0.1)
391
            Boosting learning rate.
392
393
394
            You can use ``callbacks`` parameter of ``fit`` method to shrink/adapt learning rate
            in training using ``reset_parameter`` callback.
            Note, that this will ignore the ``learning_rate`` argument in training.
395
        n_estimators : int, optional (default=100)
wxchan's avatar
wxchan committed
396
            Number of boosted trees to fit.
397
        subsample_for_bin : int, optional (default=200000)
wxchan's avatar
wxchan committed
398
            Number of samples for constructing bins.
399
        objective : str, callable or None, optional (default=None)
wxchan's avatar
wxchan committed
400
401
            Specify the learning task and the corresponding learning objective or
            a custom objective function to be used (see note below).
402
            Default: 'regression' for LGBMRegressor, 'binary' or 'multiclass' for LGBMClassifier, 'lambdarank' for LGBMRanker.
403
404
405
406
        class_weight : dict, 'balanced' or None, optional (default=None)
            Weights associated with classes in the form ``{class_label: weight}``.
            Use this parameter only for multi-class classification task;
            for binary classification task you may use ``is_unbalance`` or ``scale_pos_weight`` parameters.
407
408
409
            Note, that the usage of all these parameters will result in poor estimates of the individual class probabilities.
            You may want to consider performing probability calibration
            (https://scikit-learn.org/stable/modules/calibration.html) of your model.
410
411
412
            The 'balanced' mode uses the values of y to automatically adjust weights
            inversely proportional to class frequencies in the input data as ``n_samples / (n_classes * np.bincount(y))``.
            If None, all classes are supposed to have weight one.
413
            Note, that these weights will be multiplied with ``sample_weight`` (passed through the ``fit`` method)
414
            if ``sample_weight`` is specified.
415
        min_split_gain : float, optional (default=0.)
wxchan's avatar
wxchan committed
416
            Minimum loss reduction required to make a further partition on a leaf node of the tree.
417
        min_child_weight : float, optional (default=1e-3)
418
            Minimum sum of instance weight (hessian) needed in a child (leaf).
419
        min_child_samples : int, optional (default=20)
420
            Minimum number of data needed in a child (leaf).
421
        subsample : float, optional (default=1.)
wxchan's avatar
wxchan committed
422
            Subsample ratio of the training instance.
423
        subsample_freq : int, optional (default=0)
Andrew Ziem's avatar
Andrew Ziem committed
424
            Frequency of subsample, <=0 means no enable.
425
        colsample_bytree : float, optional (default=1.)
wxchan's avatar
wxchan committed
426
            Subsample ratio of columns when constructing each tree.
427
        reg_alpha : float, optional (default=0.)
428
            L1 regularization term on weights.
429
        reg_lambda : float, optional (default=0.)
430
            L2 regularization term on weights.
431
        random_state : int, RandomState object or None, optional (default=None)
wxchan's avatar
wxchan committed
432
            Random number seed.
433
434
435
            If int, this number is used to seed the C++ code.
            If RandomState object (numpy), a random integer is picked based on its state to seed the C++ code.
            If None, default seeds in C++ code are used.
436
        n_jobs : int, optional (default=-1)
437
            Number of parallel threads to use for training (can be changed at prediction time).
438
        silent : bool, optional (default=True)
wxchan's avatar
wxchan committed
439
            Whether to print messages while running boosting.
440
        importance_type : str, optional (default='split')
441
            The type of feature importance to be filled into ``feature_importances_``.
442
443
444
445
            If 'split', result contains numbers of times the feature is used in a model.
            If 'gain', result contains total gains of splits which use the feature.
        **kwargs
            Other parameters for the model.
wxchan's avatar
wxchan committed
446
            Check http://lightgbm.readthedocs.io/en/latest/Parameters.html for more parameters.
447

Nikita Titov's avatar
Nikita Titov committed
448
449
450
            .. warning::

                \*\*kwargs is not supported in sklearn, it may cause unexpected issues.
wxchan's avatar
wxchan committed
451
452
453

        Note
        ----
454
455
        A custom objective function can be provided for the ``objective`` parameter.
        In this case, it should have the signature
456
457
        ``objective(y_true, y_pred) -> grad, hess`` or
        ``objective(y_true, y_pred, group) -> grad, hess``:
wxchan's avatar
wxchan committed
458

Nikita Titov's avatar
Nikita Titov committed
459
            y_true : array-like of shape = [n_samples]
460
                The target values.
Nikita Titov's avatar
Nikita Titov committed
461
            y_pred : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
462
                The predicted values.
463
464
                Predicted values are returned before any transformation,
                e.g. they are raw margin instead of probability of positive class for binary task.
Nikita Titov's avatar
Nikita Titov committed
465
            group : array-like
466
467
468
                Group/query data.
                Only used in the learning-to-rank task.
                sum(group) = n_samples.
469
470
                For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
                where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
Nikita Titov's avatar
Nikita Titov committed
471
            grad : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
472
473
                The value of the first order derivative (gradient) of the loss
                with respect to the elements of y_pred for each sample point.
Nikita Titov's avatar
Nikita Titov committed
474
            hess : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
475
476
                The value of the second order derivative (Hessian) of the loss
                with respect to the elements of y_pred for each sample point.
wxchan's avatar
wxchan committed
477

478
479
480
        For multi-class task, the y_pred is group by class_id first, then group by row_id.
        If you want to get i-th row y_pred in j-th class, the access way is y_pred[j * num_data + i]
        and you should group grad and hess in this way as well.
wxchan's avatar
wxchan committed
481
        """
wxchan's avatar
wxchan committed
482
        if not SKLEARN_INSTALLED:
483
484
            raise LightGBMError('scikit-learn is required for lightgbm.sklearn. '
                                'You must install scikit-learn and restart your session to use this module.')
wxchan's avatar
wxchan committed
485

486
        self.boosting_type = boosting_type
487
        self.objective = objective
wxchan's avatar
wxchan committed
488
489
490
491
        self.num_leaves = num_leaves
        self.max_depth = max_depth
        self.learning_rate = learning_rate
        self.n_estimators = n_estimators
wxchan's avatar
wxchan committed
492
        self.subsample_for_bin = subsample_for_bin
wxchan's avatar
wxchan committed
493
494
495
496
497
498
499
500
        self.min_split_gain = min_split_gain
        self.min_child_weight = min_child_weight
        self.min_child_samples = min_child_samples
        self.subsample = subsample
        self.subsample_freq = subsample_freq
        self.colsample_bytree = colsample_bytree
        self.reg_alpha = reg_alpha
        self.reg_lambda = reg_lambda
501
502
        self.random_state = random_state
        self.n_jobs = n_jobs
wxchan's avatar
wxchan committed
503
        self.silent = silent
504
        self.importance_type = importance_type
wxchan's avatar
wxchan committed
505
        self._Booster = None
506
507
508
509
        self._evals_result = None
        self._best_score = None
        self._best_iteration = None
        self._other_params = {}
510
        self._objective = objective
511
        self.class_weight = class_weight
512
513
        self._class_weight = None
        self._class_map = None
514
        self._n_features = None
515
        self._n_features_in = None
516
517
        self._classes = None
        self._n_classes = None
518
        self.set_params(**kwargs)
wxchan's avatar
wxchan committed
519

Nikita Titov's avatar
Nikita Titov committed
520
    def _more_tags(self):
521
522
523
524
525
526
527
528
529
530
        return {
            'allow_nan': True,
            'X_types': ['2darray', 'sparse', '1dlabels'],
            '_xfail_checks': {
                'check_no_attributes_set_in_init':
                'scikit-learn incorrectly asserts that private attributes '
                'cannot be set in __init__: '
                '(see https://github.com/microsoft/LightGBM/issues/2628)'
            }
        }
Nikita Titov's avatar
Nikita Titov committed
531

532
533
534
    def __sklearn_is_fitted__(self) -> bool:
        return getattr(self, "fitted_", False)

wxchan's avatar
wxchan committed
535
    def get_params(self, deep=True):
536
537
538
539
540
541
542
543
544
545
546
547
548
        """Get parameters for this estimator.

        Parameters
        ----------
        deep : bool, optional (default=True)
            If True, will return the parameters for this estimator and
            contained subobjects that are estimators.

        Returns
        -------
        params : dict
            Parameter names mapped to their values.
        """
549
        params = super().get_params(deep=deep)
550
        params.update(self._other_params)
wxchan's avatar
wxchan committed
551
552
553
        return params

    def set_params(self, **params):
554
555
556
557
558
559
560
561
562
563
564
565
        """Set the parameters of this estimator.

        Parameters
        ----------
        **params
            Parameter names with their new values.

        Returns
        -------
        self : object
            Returns self.
        """
wxchan's avatar
wxchan committed
566
567
        for key, value in params.items():
            setattr(self, key, value)
568
569
            if hasattr(self, f"_{key}"):
                setattr(self, f"_{key}", value)
570
            self._other_params[key] = value
wxchan's avatar
wxchan committed
571
        return self
wxchan's avatar
wxchan committed
572

Guolin Ke's avatar
Guolin Ke committed
573
    def fit(self, X, y,
574
            sample_weight=None, init_score=None, group=None,
575
            eval_set=None, eval_names=None, eval_sample_weight=None,
576
            eval_class_weight=None, eval_init_score=None, eval_group=None,
577
            eval_metric=None, early_stopping_rounds=None, verbose='warn',
578
579
            feature_name='auto', categorical_feature='auto',
            callbacks=None, init_model=None):
580
        """Docstring is set after definition, using a template."""
581
582
583
584
585
586
587
        params = self.get_params()

        params.pop('objective', None)
        for alias in _ConfigAliases.get('objective'):
            if alias in params:
                self._objective = params.pop(alias)
                _log_warning(f"Found '{alias}' in params. Will use it instead of 'objective' argument")
588
589
590
591
        if self._objective is None:
            if isinstance(self, LGBMRegressor):
                self._objective = "regression"
            elif isinstance(self, LGBMClassifier):
592
593
594
595
                if self._n_classes > 2:
                    self._objective = "multiclass"
                else:
                    self._objective = "binary"
596
597
598
599
600
            elif isinstance(self, LGBMRanker):
                self._objective = "lambdarank"
            else:
                raise ValueError("Unknown LGBMModel type.")
        if callable(self._objective):
601
            self._fobj = _ObjectiveFunctionWrapper(self._objective)
602
            params['objective'] = 'None'  # objective = nullptr for unknown objective
603
604
        else:
            self._fobj = None
605
            params['objective'] = self._objective
606

wxchan's avatar
wxchan committed
607
        # user can set verbose with kwargs, it has higher priority
608
609
610
611
612
613
614
        if self.silent != "warn":
            _log_warning("'silent' argument is deprecated and will be removed in a future release of LightGBM. "
                         "Pass 'verbose' parameter via keyword arguments instead.")
            silent = self.silent
        else:
            silent = True
        if not any(verbose_alias in params for verbose_alias in _ConfigAliases.get("verbosity")) and silent:
615
            params['verbose'] = -1
wxchan's avatar
wxchan committed
616
        params.pop('silent', None)
617

618
        params.pop('importance_type', None)
wxchan's avatar
wxchan committed
619
        params.pop('n_estimators', None)
620
        params.pop('class_weight', None)
621

622
623
        if isinstance(params['random_state'], np.random.RandomState):
            params['random_state'] = params['random_state'].randint(np.iinfo(np.int32).max)
624
        if self._n_classes is not None and self._n_classes > 2:
625
626
            for alias in _ConfigAliases.get('num_class'):
                params.pop(alias, None)
627
628
            params['num_class'] = self._n_classes
        if hasattr(self, '_eval_at'):
629
            eval_at = self._eval_at
630
            for alias in _ConfigAliases.get('eval_at'):
631
632
633
634
                if alias in params:
                    _log_warning(f"Found '{alias}' in params. Will use it instead of 'eval_at' argument")
                    eval_at = params.pop(alias)
            params['eval_at'] = eval_at
wxchan's avatar
wxchan committed
635

636
637
638
639
640
641
642
643
        # Do not modify original args in fit function
        # Refer to https://github.com/microsoft/LightGBM/pull/2619
        eval_metric_list = copy.deepcopy(eval_metric)
        if not isinstance(eval_metric_list, list):
            eval_metric_list = [eval_metric_list]

        # Separate built-in from callable evaluation metrics
        eval_metrics_callable = [_EvalFunctionWrapper(f) for f in eval_metric_list if callable(f)]
644
        eval_metrics_builtin = [m for m in eval_metric_list if isinstance(m, str)]
645
646

        # register default metric for consistency with callable eval_metric case
647
        original_metric = self._objective if isinstance(self._objective, str) else None
648
649
650
651
652
653
654
655
656
657
        if original_metric is None:
            # try to deduce from class instance
            if isinstance(self, LGBMRegressor):
                original_metric = "l2"
            elif isinstance(self, LGBMClassifier):
                original_metric = "multi_logloss" if self._n_classes > 2 else "binary_logloss"
            elif isinstance(self, LGBMRanker):
                original_metric = "ndcg"

        # overwrite default metric by explicitly set metric
658
        params = _choose_param_value("metric", params, original_metric)
659
660

        # concatenate metric from params (or default if not provided in params) and eval_metric
661
662
        params['metric'] = [params['metric']] if isinstance(params['metric'], (str, type(None))) else params['metric']
        params['metric'] = [e for e in eval_metrics_builtin if e not in params['metric']] + params['metric']
663
        params['metric'] = [metric for metric in params['metric'] if metric is not None]
wxchan's avatar
wxchan committed
664

665
        if not isinstance(X, (pd_DataFrame, dt_DataTable)):
666
            _X, _y = _LGBMCheckXY(X, y, accept_sparse=True, force_all_finite=False, ensure_min_samples=2)
667
668
            if sample_weight is not None:
                sample_weight = _LGBMCheckSampleWeight(sample_weight, _X)
669
670
        else:
            _X, _y = X, y
671

672
673
674
675
        if self._class_weight is None:
            self._class_weight = self.class_weight
        if self._class_weight is not None:
            class_sample_weight = _LGBMComputeSampleWeight(self._class_weight, y)
676
677
678
679
            if sample_weight is None or len(sample_weight) == 0:
                sample_weight = class_sample_weight
            else:
                sample_weight = np.multiply(sample_weight, class_sample_weight)
680

681
        self._n_features = _X.shape[1]
682
683
        # copy for consistency
        self._n_features_in = self._n_features
684

685
686
        def _construct_dataset(X, y, sample_weight, init_score, group, params,
                               categorical_feature='auto'):
687
            return Dataset(X, label=y, weight=sample_weight, group=group,
688
689
                           init_score=init_score, params=params,
                           categorical_feature=categorical_feature)
Guolin Ke's avatar
Guolin Ke committed
690

691
692
        train_set = _construct_dataset(_X, _y, sample_weight, init_score, group, params,
                                       categorical_feature=categorical_feature)
Guolin Ke's avatar
Guolin Ke committed
693
694
695

        valid_sets = []
        if eval_set is not None:
696

697
            def _get_meta_data(collection, name, i):
698
699
700
701
702
703
704
                if collection is None:
                    return None
                elif isinstance(collection, list):
                    return collection[i] if len(collection) > i else None
                elif isinstance(collection, dict):
                    return collection.get(i, None)
                else:
705
                    raise TypeError(f"{name} should be dict or list")
706

Guolin Ke's avatar
Guolin Ke committed
707
708
709
            if isinstance(eval_set, tuple):
                eval_set = [eval_set]
            for i, valid_data in enumerate(eval_set):
710
                # reduce cost for prediction training data
Guolin Ke's avatar
Guolin Ke committed
711
712
713
                if valid_data[0] is X and valid_data[1] is y:
                    valid_set = train_set
                else:
714
715
716
717
718
719
                    valid_weight = _get_meta_data(eval_sample_weight, 'eval_sample_weight', i)
                    valid_class_weight = _get_meta_data(eval_class_weight, 'eval_class_weight', i)
                    if valid_class_weight is not None:
                        if isinstance(valid_class_weight, dict) and self._class_map is not None:
                            valid_class_weight = {self._class_map[k]: v for k, v in valid_class_weight.items()}
                        valid_class_sample_weight = _LGBMComputeSampleWeight(valid_class_weight, valid_data[1])
720
721
722
723
                        if valid_weight is None or len(valid_weight) == 0:
                            valid_weight = valid_class_sample_weight
                        else:
                            valid_weight = np.multiply(valid_weight, valid_class_sample_weight)
724
725
                    valid_init_score = _get_meta_data(eval_init_score, 'eval_init_score', i)
                    valid_group = _get_meta_data(eval_group, 'eval_group', i)
726
727
                    valid_set = _construct_dataset(valid_data[0], valid_data[1],
                                                   valid_weight, valid_init_score, valid_group, params)
Guolin Ke's avatar
Guolin Ke committed
728
729
                valid_sets.append(valid_set)

730
731
732
        if isinstance(init_model, LGBMModel):
            init_model = init_model.booster_

733
734
735
736
737
738
739
740
        if early_stopping_rounds is not None and early_stopping_rounds > 0:
            _log_warning("'early_stopping_rounds' argument is deprecated and will be removed in a future release of LightGBM. "
                         "Pass 'early_stopping()' callback via 'callbacks' argument instead.")
            params['early_stopping_rounds'] = early_stopping_rounds

        if callbacks is None:
            callbacks = []
        else:
741
            callbacks = copy.copy(callbacks)  # don't use deepcopy here to allow non-serializable objects
742
743
744

        if verbose != 'warn':
            _log_warning("'verbose' argument is deprecated and will be removed in a future release of LightGBM. "
745
                         "Pass 'log_evaluation()' callback via 'callbacks' argument instead.")
746
        else:
747
            if callbacks:  # assume user has already specified log_evaluation callback
748
749
750
                verbose = False
            else:
                verbose = True
751
        callbacks.append(log_evaluation(int(verbose)))
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767

        evals_result = {}
        callbacks.append(record_evaluation(evals_result))

        self._Booster = train(
            params=params,
            train_set=train_set,
            num_boost_round=self.n_estimators,
            valid_sets=valid_sets,
            valid_names=eval_names,
            fobj=self._fobj,
            feval=eval_metrics_callable,
            init_model=init_model,
            feature_name=feature_name,
            callbacks=callbacks
        )
wxchan's avatar
wxchan committed
768
769

        if evals_result:
770
            self._evals_result = evals_result
771
772
        else:  # reset after previous call to fit()
            self._evals_result = None
wxchan's avatar
wxchan committed
773

774
        if self._Booster.best_iteration != 0:
775
            self._best_iteration = self._Booster.best_iteration
776
777
        else:  # reset after previous call to fit()
            self._best_iteration = None
778
779

        self._best_score = self._Booster.best_score
wxchan's avatar
wxchan committed
780

781
782
        self.fitted_ = True

wxchan's avatar
wxchan committed
783
        # free dataset
784
        self._Booster.free_dataset()
wxchan's avatar
wxchan committed
785
        del train_set, valid_sets
wxchan's avatar
wxchan committed
786
787
        return self

788
789
790
791
    fit.__doc__ = _lgbmmodel_doc_fit.format(
        X_shape="array-like or sparse matrix of shape = [n_samples, n_features]",
        y_shape="array-like of shape = [n_samples]",
        sample_weight_shape="array-like of shape = [n_samples] or None, optional (default=None)",
792
        init_score_shape="array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task) or shape = [n_samples, n_classes] (for multi-class task) or None, optional (default=None)",
793
        group_shape="array-like or None, optional (default=None)",
794
795
796
        eval_sample_weight_shape="list of array, or None, optional (default=None)",
        eval_init_score_shape="list of array, or None, optional (default=None)",
        eval_group_shape="list of array, or None, optional (default=None)"
797
798
    ) + "\n\n" + _lgbmmodel_doc_custom_eval_note

799
    def predict(self, X, raw_score=False, start_iteration=0, num_iteration=None,
800
                pred_leaf=False, pred_contrib=False, **kwargs):
801
        """Docstring is set after definition, using a template."""
802
        if not self.__sklearn_is_fitted__():
803
            raise LGBMNotFittedError("Estimator not fitted, call fit before exploiting the model.")
804
        if not isinstance(X, (pd_DataFrame, dt_DataTable)):
805
            X = _LGBMCheckArray(X, accept_sparse=True, force_all_finite=False)
806
807
808
        n_features = X.shape[1]
        if self._n_features != n_features:
            raise ValueError("Number of features of the model must "
809
810
                             f"match the input. Model n_features_ is {self._n_features} and "
                             f"input n_features is {n_features}")
811
        return self._Booster.predict(X, raw_score=raw_score, start_iteration=start_iteration, num_iteration=num_iteration,
812
                                     pred_leaf=pred_leaf, pred_contrib=pred_contrib, **kwargs)
wxchan's avatar
wxchan committed
813

814
815
816
817
818
819
820
821
822
    predict.__doc__ = _lgbmmodel_doc_predict.format(
        description="Return the predicted value for each sample.",
        X_shape="array-like or sparse matrix of shape = [n_samples, n_features]",
        output_name="predicted_result",
        predicted_result_shape="array-like of shape = [n_samples] or shape = [n_samples, n_classes]",
        X_leaves_shape="array-like of shape = [n_samples, n_trees] or shape = [n_samples, n_trees * n_classes]",
        X_SHAP_values_shape="array-like of shape = [n_samples, n_features + 1] or shape = [n_samples, (n_features + 1) * n_classes] or list with n_classes length of such objects"
    )

823
824
    @property
    def n_features_(self):
825
        """:obj:`int`: The number of features of fitted model."""
826
        if not self.__sklearn_is_fitted__():
827
828
829
            raise LGBMNotFittedError('No n_features found. Need to call fit beforehand.')
        return self._n_features

830
831
832
    @property
    def n_features_in_(self):
        """:obj:`int`: The number of features of fitted model."""
833
        if not self.__sklearn_is_fitted__():
834
835
836
            raise LGBMNotFittedError('No n_features_in found. Need to call fit beforehand.')
        return self._n_features_in

837
838
    @property
    def best_score_(self):
839
        """:obj:`dict`: The best score of fitted model."""
840
        if not self.__sklearn_is_fitted__():
841
842
843
844
845
            raise LGBMNotFittedError('No best_score found. Need to call fit beforehand.')
        return self._best_score

    @property
    def best_iteration_(self):
846
        """:obj:`int` or :obj:`None`: The best iteration of fitted model if ``early_stopping()`` callback has been specified."""
847
        if not self.__sklearn_is_fitted__():
848
            raise LGBMNotFittedError('No best_iteration found. Need to call fit with early_stopping callback beforehand.')
849
850
851
852
        return self._best_iteration

    @property
    def objective_(self):
853
        """:obj:`str` or :obj:`callable`: The concrete objective used while fitting this model."""
854
        if not self.__sklearn_is_fitted__():
855
856
857
            raise LGBMNotFittedError('No objective found. Need to call fit beforehand.')
        return self._objective

858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
    @property
    def n_estimators_(self) -> int:
        """:obj:`int`: True number of boosting iterations performed.

        This might be less than parameter ``n_estimators`` if early stopping was enabled or
        if boosting stopped early due to limits on complexity like ``min_gain_to_split``.
        """
        if not self.__sklearn_is_fitted__():
            raise LGBMNotFittedError('No n_estimators found. Need to call fit beforehand.')
        return self._Booster.current_iteration()

    @property
    def n_iter_(self) -> int:
        """:obj:`int`: True number of boosting iterations performed.

        This might be less than parameter ``n_estimators`` if early stopping was enabled or
        if boosting stopped early due to limits on complexity like ``min_gain_to_split``.
        """
        if not self.__sklearn_is_fitted__():
            raise LGBMNotFittedError('No n_iter found. Need to call fit beforehand.')
        return self._Booster.current_iteration()

880
881
    @property
    def booster_(self):
882
        """Booster: The underlying Booster of this model."""
883
        if not self.__sklearn_is_fitted__():
884
            raise LGBMNotFittedError('No booster found. Need to call fit beforehand.')
885
        return self._Booster
wxchan's avatar
wxchan committed
886

887
888
    @property
    def evals_result_(self):
889
        """:obj:`dict` or :obj:`None`: The evaluation results if validation sets have been specified."""
890
        if not self.__sklearn_is_fitted__():
891
892
            raise LGBMNotFittedError('No results found. Need to call fit with eval_set beforehand.')
        return self._evals_result
893
894

    @property
895
    def feature_importances_(self):
896
        """:obj:`array` of shape = [n_features]: The feature importances (the higher, the more important).
897

Nikita Titov's avatar
Nikita Titov committed
898
899
900
901
        .. note::

            ``importance_type`` attribute is passed to the function
            to configure the type of importance values to be extracted.
902
        """
903
        if not self.__sklearn_is_fitted__():
904
            raise LGBMNotFittedError('No feature_importances found. Need to call fit beforehand.')
905
        return self._Booster.feature_importance(importance_type=self.importance_type)
wxchan's avatar
wxchan committed
906

907
908
    @property
    def feature_name_(self):
909
        """:obj:`array` of shape = [n_features]: The names of features."""
910
        if not self.__sklearn_is_fitted__():
911
912
913
            raise LGBMNotFittedError('No feature_name found. Need to call fit beforehand.')
        return self._Booster.feature_name()

wxchan's avatar
wxchan committed
914

915
class LGBMRegressor(_LGBMRegressorBase, LGBMModel):
916
    """LightGBM regressor."""
wxchan's avatar
wxchan committed
917

Guolin Ke's avatar
Guolin Ke committed
918
919
    def fit(self, X, y,
            sample_weight=None, init_score=None,
920
            eval_set=None, eval_names=None, eval_sample_weight=None,
921
            eval_init_score=None, eval_metric=None, early_stopping_rounds=None,
922
            verbose='warn', feature_name='auto', categorical_feature='auto',
923
            callbacks=None, init_model=None):
924
        """Docstring is inherited from the LGBMModel."""
925
926
927
928
929
        super().fit(X, y, sample_weight=sample_weight, init_score=init_score,
                    eval_set=eval_set, eval_names=eval_names, eval_sample_weight=eval_sample_weight,
                    eval_init_score=eval_init_score, eval_metric=eval_metric,
                    early_stopping_rounds=early_stopping_rounds, verbose=verbose, feature_name=feature_name,
                    categorical_feature=categorical_feature, callbacks=callbacks, init_model=init_model)
Guolin Ke's avatar
Guolin Ke committed
930
931
        return self

932
    _base_doc = LGBMModel.fit.__doc__.replace("self : LGBMModel", "self : LGBMRegressor")
933
934
    _base_doc = (_base_doc[:_base_doc.find('group :')]  # type: ignore
                 + _base_doc[_base_doc.find('eval_set :'):])  # type: ignore
935
936
937
938
    _base_doc = (_base_doc[:_base_doc.find('eval_class_weight :')]
                 + _base_doc[_base_doc.find('eval_init_score :'):])
    fit.__doc__ = (_base_doc[:_base_doc.find('eval_group :')]
                   + _base_doc[_base_doc.find('eval_metric :'):])
wxchan's avatar
wxchan committed
939

940

941
class LGBMClassifier(_LGBMClassifierBase, LGBMModel):
942
    """LightGBM classifier."""
wxchan's avatar
wxchan committed
943

Guolin Ke's avatar
Guolin Ke committed
944
945
    def fit(self, X, y,
            sample_weight=None, init_score=None,
946
            eval_set=None, eval_names=None, eval_sample_weight=None,
947
            eval_class_weight=None, eval_init_score=None, eval_metric=None,
948
            early_stopping_rounds=None, verbose='warn',
949
950
            feature_name='auto', categorical_feature='auto',
            callbacks=None, init_model=None):
951
        """Docstring is inherited from the LGBMModel."""
952
        _LGBMAssertAllFinite(y)
953
954
        _LGBMCheckClassificationTargets(y)
        self._le = _LGBMLabelEncoder().fit(y)
955
        _y = self._le.transform(y)
956
        self._class_map = dict(zip(self._le.classes_, self._le.transform(self._le.classes_)))
957
958
        if isinstance(self.class_weight, dict):
            self._class_weight = {self._class_map[k]: v for k, v in self.class_weight.items()}
959

960
961
        self._classes = self._le.classes_
        self._n_classes = len(self._classes)
962
963

        if not callable(eval_metric):
964
            if isinstance(eval_metric, (str, type(None))):
965
966
967
968
969
970
971
972
973
974
975
976
977
                eval_metric = [eval_metric]
            if self._n_classes > 2:
                for index, metric in enumerate(eval_metric):
                    if metric in {'logloss', 'binary_logloss'}:
                        eval_metric[index] = "multi_logloss"
                    elif metric in {'error', 'binary_error'}:
                        eval_metric[index] = "multi_error"
            else:
                for index, metric in enumerate(eval_metric):
                    if metric in {'logloss', 'multi_logloss'}:
                        eval_metric[index] = 'binary_logloss'
                    elif metric in {'error', 'multi_error'}:
                        eval_metric[index] = 'binary_error'
wxchan's avatar
wxchan committed
978

979
980
        # do not modify args, as it causes errors in model selection tools
        valid_sets = None
wxchan's avatar
wxchan committed
981
        if eval_set is not None:
982
983
            if isinstance(eval_set, tuple):
                eval_set = [eval_set]
984
            valid_sets = [None] * len(eval_set)
985
986
            for i, (valid_x, valid_y) in enumerate(eval_set):
                if valid_x is X and valid_y is y:
987
                    valid_sets[i] = (valid_x, _y)
988
                else:
989
                    valid_sets[i] = (valid_x, self._le.transform(valid_y))
990

991
992
993
994
995
996
        super().fit(X, _y, sample_weight=sample_weight, init_score=init_score, eval_set=valid_sets,
                    eval_names=eval_names, eval_sample_weight=eval_sample_weight,
                    eval_class_weight=eval_class_weight, eval_init_score=eval_init_score,
                    eval_metric=eval_metric, early_stopping_rounds=early_stopping_rounds,
                    verbose=verbose, feature_name=feature_name, categorical_feature=categorical_feature,
                    callbacks=callbacks, init_model=init_model)
wxchan's avatar
wxchan committed
997
998
        return self

999
    _base_doc = LGBMModel.fit.__doc__.replace("self : LGBMModel", "self : LGBMClassifier")
1000
1001
    _base_doc = (_base_doc[:_base_doc.find('group :')]  # type: ignore
                 + _base_doc[_base_doc.find('eval_set :'):])  # type: ignore
1002
1003
    fit.__doc__ = (_base_doc[:_base_doc.find('eval_group :')]
                   + _base_doc[_base_doc.find('eval_metric :'):])
1004

1005
    def predict(self, X, raw_score=False, start_iteration=0, num_iteration=None,
1006
                pred_leaf=False, pred_contrib=False, **kwargs):
1007
        """Docstring is inherited from the LGBMModel."""
1008
        result = self.predict_proba(X, raw_score, start_iteration, num_iteration,
1009
                                    pred_leaf, pred_contrib, **kwargs)
1010
        if callable(self._objective) or raw_score or pred_leaf or pred_contrib:
1011
1012
1013
1014
            return result
        else:
            class_index = np.argmax(result, axis=1)
            return self._le.inverse_transform(class_index)
wxchan's avatar
wxchan committed
1015

1016
1017
    predict.__doc__ = LGBMModel.predict.__doc__

1018
    def predict_proba(self, X, raw_score=False, start_iteration=0, num_iteration=None,
1019
                      pred_leaf=False, pred_contrib=False, **kwargs):
1020
        """Docstring is set after definition, using a template."""
1021
        result = super().predict(X, raw_score, start_iteration, num_iteration, pred_leaf, pred_contrib, **kwargs)
1022
        if callable(self._objective) and not (raw_score or pred_leaf or pred_contrib):
1023
1024
1025
            _log_warning("Cannot compute class probabilities or labels "
                         "due to the usage of customized objective function.\n"
                         "Returning raw scores instead.")
1026
1027
            return result
        elif self._n_classes > 2 or raw_score or pred_leaf or pred_contrib:
1028
            return result
wxchan's avatar
wxchan committed
1029
        else:
1030
            return np.vstack((1. - result, result)).transpose()
1031

1032
1033
1034
1035
    predict_proba.__doc__ = _lgbmmodel_doc_predict.format(
        description="Return the predicted probability for each class for each sample.",
        X_shape="array-like or sparse matrix of shape = [n_samples, n_features]",
        output_name="predicted_probability",
1036
        predicted_result_shape="array-like of shape = [n_samples] or shape = [n_samples, n_classes]",
1037
1038
1039
1040
        X_leaves_shape="array-like of shape = [n_samples, n_trees] or shape = [n_samples, n_trees * n_classes]",
        X_SHAP_values_shape="array-like of shape = [n_samples, n_features + 1] or shape = [n_samples, (n_features + 1) * n_classes] or list with n_classes length of such objects"
    )

1041
1042
    @property
    def classes_(self):
1043
        """:obj:`array` of shape = [n_classes]: The class label array."""
1044
        if not self.__sklearn_is_fitted__():
1045
1046
            raise LGBMNotFittedError('No classes found. Need to call fit beforehand.')
        return self._classes
1047
1048
1049

    @property
    def n_classes_(self):
1050
        """:obj:`int`: The number of classes."""
1051
        if not self.__sklearn_is_fitted__():
1052
1053
            raise LGBMNotFittedError('No classes found. Need to call fit beforehand.')
        return self._n_classes
wxchan's avatar
wxchan committed
1054

wxchan's avatar
wxchan committed
1055

wxchan's avatar
wxchan committed
1056
class LGBMRanker(LGBMModel):
1057
1058
1059
1060
1061
1062
1063
1064
    """LightGBM ranker.

    .. warning::

        scikit-learn doesn't support ranking applications yet,
        therefore this class is not really compatible with the sklearn ecosystem.
        Please use this class mainly for training and applying ranking models in common sklearnish way.
    """
wxchan's avatar
wxchan committed
1065

Guolin Ke's avatar
Guolin Ke committed
1066
    def fit(self, X, y,
1067
            sample_weight=None, init_score=None, group=None,
1068
            eval_set=None, eval_names=None, eval_sample_weight=None,
1069
            eval_init_score=None, eval_group=None, eval_metric=None,
1070
            eval_at=(1, 2, 3, 4, 5), early_stopping_rounds=None, verbose='warn',
1071
1072
            feature_name='auto', categorical_feature='auto',
            callbacks=None, init_model=None):
1073
        """Docstring is inherited from the LGBMModel."""
1074
        # check group data
Guolin Ke's avatar
Guolin Ke committed
1075
        if group is None:
1076
            raise ValueError("Should set group for ranking task")
wxchan's avatar
wxchan committed
1077
1078

        if eval_set is not None:
Guolin Ke's avatar
Guolin Ke committed
1079
            if eval_group is None:
1080
                raise ValueError("Eval_group cannot be None when eval_set is not None")
Guolin Ke's avatar
Guolin Ke committed
1081
            elif len(eval_group) != len(eval_set):
1082
                raise ValueError("Length of eval_group should be equal to eval_set")
1083
            elif (isinstance(eval_group, dict)
1084
                  and any(i not in eval_group or eval_group[i] is None for i in range(len(eval_group)))
1085
1086
                  or isinstance(eval_group, list)
                  and any(group is None for group in eval_group)):
1087
1088
                raise ValueError("Should set group for all eval datasets for ranking task; "
                                 "if you use dict, the index should start from 0")
1089

1090
        self._eval_at = eval_at
1091
1092
1093
1094
1095
        super().fit(X, y, sample_weight=sample_weight, init_score=init_score, group=group,
                    eval_set=eval_set, eval_names=eval_names, eval_sample_weight=eval_sample_weight,
                    eval_init_score=eval_init_score, eval_group=eval_group, eval_metric=eval_metric,
                    early_stopping_rounds=early_stopping_rounds, verbose=verbose, feature_name=feature_name,
                    categorical_feature=categorical_feature, callbacks=callbacks, init_model=init_model)
wxchan's avatar
wxchan committed
1096
        return self
1097

1098
    _base_doc = LGBMModel.fit.__doc__.replace("self : LGBMModel", "self : LGBMRanker")
1099
1100
    fit.__doc__ = (_base_doc[:_base_doc.find('eval_class_weight :')]  # type: ignore
                   + _base_doc[_base_doc.find('eval_init_score :'):])  # type: ignore
1101
    _base_doc = fit.__doc__
1102
    _before_early_stop, _early_stop, _after_early_stop = _base_doc.partition('early_stopping_rounds :')
1103
1104
1105
    fit.__doc__ = f"""{_before_early_stop}eval_at : iterable of int, optional (default=(1, 2, 3, 4, 5))
        The evaluation positions of the specified metric.
    {_early_stop}{_after_early_stop}"""