sklearn.py 29.6 KB
Newer Older
wxchan's avatar
wxchan committed
1
# coding: utf-8
2
# pylint: disable = invalid-name, W0105, C0111, C0301
wxchan's avatar
wxchan committed
3
4
"""Scikit-Learn Wrapper interface for LightGBM."""
from __future__ import absolute_import
5

6
import inspect
wxchan's avatar
wxchan committed
7
8

import numpy as np
9
10

from .basic import IS_PY3, Dataset, LightGBMError
wxchan's avatar
wxchan committed
11
from .engine import train
12

13
'''sklearn'''
wxchan's avatar
wxchan committed
14
15
16
17
try:
    from sklearn.base import BaseEstimator
    from sklearn.base import RegressorMixin, ClassifierMixin
    from sklearn.preprocessing import LabelEncoder
18
    from sklearn.utils import deprecated
wxchan's avatar
wxchan committed
19
20
21
22
23
24
25
26
27
28
29
30
    SKLEARN_INSTALLED = True
    LGBMModelBase = BaseEstimator
    LGBMRegressorBase = RegressorMixin
    LGBMClassifierBase = ClassifierMixin
    LGBMLabelEncoder = LabelEncoder
except ImportError:
    SKLEARN_INSTALLED = False
    LGBMModelBase = object
    LGBMClassifierBase = object
    LGBMRegressorBase = object
    LGBMLabelEncoder = None

wxchan's avatar
wxchan committed
31

wxchan's avatar
wxchan committed
32
33
34
35
36
37
38
def _argc(func):
    if IS_PY3:
        return len(inspect.signature(func).parameters)
    else:
        return len(inspect.getargspec(func).args)


39
def _objective_function_wrapper(func):
wxchan's avatar
wxchan committed
40
41
42
43
44
45
46
    """Decorate an objective function
    Note: for multi-class task, the y_pred is group by class_id first, then group by row_id
          if you want to get i-th row y_pred in j-th class, the access way is y_pred[j*num_data+i]
          and you should group grad and hess in this way as well
    Parameters
    ----------
    func: callable
47
48
49
        Expects a callable with signature ``func(y_true, y_pred)`` or ``func(y_true, y_pred, group):
            y_true: array_like of shape [n_samples]
                The target values
50
            y_pred: array_like of shape [n_samples] or shape[n_samples * n_class] (for multi-class)
51
52
53
                The predicted values
            group: array_like
                group/query data, used for ranking task
wxchan's avatar
wxchan committed
54
55
56
57
58
59
60

    Returns
    -------
    new_func: callable
        The new objective function as expected by ``lightgbm.engine.train``.
        The signature is ``new_func(preds, dataset)``:

61
        preds: array_like, shape [n_samples] or shape[n_samples * n_class]
wxchan's avatar
wxchan committed
62
63
64
65
66
67
68
69
            The predicted values
        dataset: ``dataset``
            The training set from which the labels will be extracted using
            ``dataset.get_label()``
    """
    def inner(preds, dataset):
        """internal function"""
        labels = dataset.get_label()
wxchan's avatar
wxchan committed
70
        argc = _argc(func)
71
72
73
74
75
        if argc == 2:
            grad, hess = func(labels, preds)
        elif argc == 3:
            grad, hess = func(labels, preds, dataset.get_group())
        else:
wxchan's avatar
wxchan committed
76
            raise TypeError("Self-defined objective function should have 2 or 3 arguments, got %d" % argc)
wxchan's avatar
wxchan committed
77
78
79
80
81
82
83
84
85
86
87
        """weighted for objective"""
        weight = dataset.get_weight()
        if weight is not None:
            """only one class"""
            if len(weight) == len(grad):
                grad = np.multiply(grad, weight)
                hess = np.multiply(hess, weight)
            else:
                num_data = len(weight)
                num_class = len(grad) // num_data
                if num_class * num_data != len(grad):
88
                    raise ValueError("Length of grad and hess should equal to num_class * num_data")
wxchan's avatar
wxchan committed
89
90
91
92
93
94
95
96
                for k in range(num_class):
                    for i in range(num_data):
                        idx = k * num_data + i
                        grad[idx] *= weight[i]
                        hess[idx] *= weight[i]
        return grad, hess
    return inner

wxchan's avatar
wxchan committed
97

98
99
100
101
102
103
104
def _eval_function_wrapper(func):
    """Decorate an eval function
    Note: for multi-class task, the y_pred is group by class_id first, then group by row_id
          if you want to get i-th row y_pred in j-th class, the access way is y_pred[j*num_data+i]
    Parameters
    ----------
    func: callable
105
106
107
108
109
        Expects a callable with following functions:
            ``func(y_true, y_pred)``,
            ``func(y_true, y_pred, weight)``
         or ``func(y_true, y_pred, weight, group)``
            and return (eval_name->str, eval_result->float, is_bigger_better->Bool):
110
111
112

            y_true: array_like of shape [n_samples]
                The target values
113
            y_pred: array_like of shape [n_samples] or shape[n_samples * n_class] (for multi-class)
114
115
116
117
118
119
120
121
122
123
124
125
                The predicted values
            weight: array_like of shape [n_samples]
                The weight of samples
            group: array_like
                group/query data, used for ranking task

    Returns
    -------
    new_func: callable
        The new eval function as expected by ``lightgbm.engine.train``.
        The signature is ``new_func(preds, dataset)``:

126
        preds: array_like, shape [n_samples] or shape[n_samples * n_class]
127
128
129
130
131
132
133
134
            The predicted values
        dataset: ``dataset``
            The training set from which the labels will be extracted using
            ``dataset.get_label()``
    """
    def inner(preds, dataset):
        """internal function"""
        labels = dataset.get_label()
wxchan's avatar
wxchan committed
135
        argc = _argc(func)
136
137
138
139
140
141
142
        if argc == 2:
            return func(labels, preds)
        elif argc == 3:
            return func(labels, preds, dataset.get_weight())
        elif argc == 4:
            return func(labels, preds, dataset.get_weight(), dataset.get_group())
        else:
wxchan's avatar
wxchan committed
143
            raise TypeError("Self-defined eval function should have 2, 3 or 4 arguments, got %d" % argc)
144
145
    return inner

wxchan's avatar
wxchan committed
146

wxchan's avatar
wxchan committed
147
148
class LGBMModel(LGBMModelBase):

149
    def __init__(self, boosting_type="gbdt", num_leaves=31, max_depth=-1,
wxchan's avatar
wxchan committed
150
                 learning_rate=0.1, n_estimators=10, max_bin=255,
wxchan's avatar
wxchan committed
151
152
                 subsample_for_bin=50000, objective="regression",
                 min_split_gain=0, min_child_weight=5, min_child_samples=10,
wxchan's avatar
wxchan committed
153
154
                 subsample=1, subsample_freq=1, colsample_bytree=1,
                 reg_alpha=0, reg_lambda=0, scale_pos_weight=1,
wxchan's avatar
wxchan committed
155
156
                 is_unbalance=False, seed=0, nthread=-1, silent=True,
                 sigmoid=1.0, max_position=20, label_gain=None,
157
158
                 drop_rate=0.1, skip_drop=0.5, max_drop=50,
                 uniform_drop=False, xgboost_dart_mode=False):
wxchan's avatar
wxchan committed
159
160
161
162
163
        """
        Implementation of the Scikit-Learn API for LightGBM.

        Parameters
        ----------
164
165
166
        boosting_type : string
            gbdt, traditional Gradient Boosting Decision Tree
            dart, Dropouts meet Multiple Additive Regression Trees
wxchan's avatar
wxchan committed
167
168
169
170
171
172
173
174
        num_leaves : int
            Maximum tree leaves for base learners.
        max_depth : int
            Maximum tree depth for base learners, -1 means no limit.
        learning_rate : float
            Boosting learning rate
        n_estimators : int
            Number of boosted trees to fit.
Guolin Ke's avatar
Guolin Ke committed
175
176
        max_bin : int
            Number of bucketed bin for feature values
wxchan's avatar
wxchan committed
177
178
        subsample_for_bin : int
            Number of samples for constructing bins.
wxchan's avatar
wxchan committed
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
        objective : string or callable
            Specify the learning task and the corresponding learning objective or
            a custom objective function to be used (see note below).
            default: binary for LGBMClassifier, lambdarank for LGBMRanker
        min_split_gain : float
            Minimum loss reduction required to make a further partition on a leaf node of the tree.
        min_child_weight : int
            Minimum sum of instance weight(hessian) needed in a child(leaf)
        min_child_samples : int
            Minimum number of data need in a child(leaf)
        subsample : float
            Subsample ratio of the training instance.
        subsample_freq : int
            frequence of subsample, <=0 means no enable
        colsample_bytree : float
            Subsample ratio of columns when constructing each tree.
        reg_alpha : float
            L1 regularization term on weights
        reg_lambda : float
            L2 regularization term on weights
        scale_pos_weight : float
            Balancing of positive and negative weights.
        is_unbalance : bool
            Is unbalance for binary classification
        seed : int
            Random number seed.
wxchan's avatar
wxchan committed
205
206
207
208
209
210
211
212
213
214
215
216
        nthread : int
            Number of parallel threads
        silent : boolean
            Whether to print messages while running boosting.
        sigmoid : float
            Only used in binary classification and lambdarank. Parameter for sigmoid function.
        max_position : int
            Only used in lambdarank, will optimize NDCG at this position.
        label_gain : list of float
            Only used in lambdarank, relevant gain for labels.
            For example, the gain of label 2 is 3 if using default label gains.
            None (default) means use default value of CLI version: {0,1,3,7,15,31,63,...}.
217
218
219
220
221
222
223
224
225
226
        drop_rate : float
            Only used when boosting_type='dart'. Probablity to select dropping trees.
        skip_drop : float
            Only used when boosting_type='dart'. Probablity to skip dropping trees.
        max_drop : int
            Only used when boosting_type='dart'. Max number of dropped trees in one iteration.
        uniform_drop : bool
            Only used when boosting_type='dart'. If true, drop trees uniformly, else drop according to weights.
        xgboost_dart_mode : bool
            Only used when boosting_type='dart'. Whether use xgboost dart mode.
wxchan's avatar
wxchan committed
227
228
229
230
231

        Note
        ----
        A custom objective function can be provided for the ``objective``
        parameter. In this case, it should have the signature
232
        ``objective(y_true, y_pred) -> grad, hess``
wxchan's avatar
wxchan committed
233
234
235
236
            or ``objective(y_true, y_pred, group) -> grad, hess``:

            y_true: array_like of shape [n_samples]
                The target values
237
            y_pred: array_like of shape [n_samples] or shape[n_samples * n_class]
wxchan's avatar
wxchan committed
238
239
240
                The predicted values
            group: array_like
                group/query data, used for ranking task
241
            grad: array_like of shape [n_samples] or shape[n_samples * n_class]
wxchan's avatar
wxchan committed
242
                The value of the gradient for each sample point.
243
            hess: array_like of shape [n_samples] or shape[n_samples * n_class]
wxchan's avatar
wxchan committed
244
245
246
247
248
249
                The value of the second derivative for each sample point

        for multi-class task, the y_pred is group by class_id first, then group by row_id
            if you want to get i-th row y_pred in j-th class, the access way is y_pred[j*num_data+i]
            and you should group grad and hess in this way as well
        """
wxchan's avatar
wxchan committed
250
        if not SKLEARN_INSTALLED:
251
            raise LightGBMError('Scikit-learn is required for this module')
wxchan's avatar
wxchan committed
252

253
        self.boosting_type = boosting_type
wxchan's avatar
wxchan committed
254
255
256
257
258
        self.num_leaves = num_leaves
        self.max_depth = max_depth
        self.learning_rate = learning_rate
        self.n_estimators = n_estimators
        self.max_bin = max_bin
wxchan's avatar
wxchan committed
259
        self.subsample_for_bin = subsample_for_bin
wxchan's avatar
wxchan committed
260
261
262
263
264
265
266
267
268
269
270
271
        self.objective = objective
        self.min_split_gain = min_split_gain
        self.min_child_weight = min_child_weight
        self.min_child_samples = min_child_samples
        self.subsample = subsample
        self.subsample_freq = subsample_freq
        self.colsample_bytree = colsample_bytree
        self.reg_alpha = reg_alpha
        self.reg_lambda = reg_lambda
        self.scale_pos_weight = scale_pos_weight
        self.is_unbalance = is_unbalance
        self.seed = seed
wxchan's avatar
wxchan committed
272
273
274
275
276
        self.nthread = nthread
        self.silent = silent
        self.sigmoid = sigmoid
        self.max_position = max_position
        self.label_gain = label_gain
277
278
279
280
281
        self.drop_rate = drop_rate
        self.skip_drop = skip_drop
        self.max_drop = max_drop
        self.uniform_drop = uniform_drop
        self.xgboost_dart_mode = xgboost_dart_mode
wxchan's avatar
wxchan committed
282
        self._Booster = None
283
        self.evals_result = None
284
        self.best_iteration = -1
wxchan's avatar
wxchan committed
285
        if callable(self.objective):
286
            self.fobj = _objective_function_wrapper(self.objective)
wxchan's avatar
wxchan committed
287
288
289
        else:
            self.fobj = None

Guolin Ke's avatar
Guolin Ke committed
290
    def fit(self, X, y,
291
292
            sample_weight=None, init_score=None, group=None,
            eval_set=None, eval_sample_weight=None,
Guolin Ke's avatar
Guolin Ke committed
293
294
            eval_init_score=None, eval_group=None,
            eval_metric=None,
wxchan's avatar
wxchan committed
295
            early_stopping_rounds=None, verbose=True,
296
297
            feature_name=None, categorical_feature=None,
            callbacks=None):
wxchan's avatar
wxchan committed
298
299
300
301
302
303
304
305
306
        """
        Fit the gradient boosting model

        Parameters
        ----------
        X : array_like
            Feature matrix
        y : array_like
            Labels
Guolin Ke's avatar
Guolin Ke committed
307
308
309
310
311
312
        sample_weight : array_like
            weight of training data
        init_score : array_like
            init score of training data
        group : array_like
            group data of training data
wxchan's avatar
wxchan committed
313
314
        eval_set : list, optional
            A list of (X, y) tuple pairs to use as a validation set for early-stopping
Guolin Ke's avatar
Guolin Ke committed
315
316
317
318
319
320
        eval_sample_weight : List of array
            weight of eval data
        eval_init_score : List of array
            init score of eval data
        eval_group : List of array
            group data of eval data
wxchan's avatar
wxchan committed
321
322
        eval_metric : str, list of str, callable, optional
            If a str, should be a built-in evaluation metric to use.
323
            If callable, a custom evaluation metric, see note for more details.
wxchan's avatar
wxchan committed
324
325
326
        early_stopping_rounds : int
        verbose : bool
            If `verbose` and an evaluation set is used, writes the evaluation
Guolin Ke's avatar
Guolin Ke committed
327
        feature_name : list of str
328
329
            Feature names
        categorical_feature : list of str or int
wxchan's avatar
wxchan committed
330
331
            Categorical features,
            type int represents index,
332
            type str represents feature names (need to specify feature_name as well)
333
334
335
        callbacks : list of callback functions
            List of callback functions that are applied at each iteration.
            See Callbacks in Python-API.md for more information.
336
337
338

        Note
        ----
wxchan's avatar
wxchan committed
339
340
341
342
343
        Custom eval function expects a callable with following functions:
            ``func(y_true, y_pred)``, ``func(y_true, y_pred, weight)``
                or ``func(y_true, y_pred, weight, group)``.
            return (eval_name, eval_result, is_bigger_better)
                or list of (eval_name, eval_result, is_bigger_better)
344
345
346

            y_true: array_like of shape [n_samples]
                The target values
wxchan's avatar
wxchan committed
347
            y_pred: array_like of shape [n_samples] or shape[n_samples * n_class] (for multi-class)
348
349
350
351
352
353
354
355
356
357
358
359
360
                The predicted values
            weight: array_like of shape [n_samples]
                The weight of samples
            group: array_like
                group/query data, used for ranking task
            eval_name: str
                name of evaluation
            eval_result: float
                eval result
            is_bigger_better: bool
                is eval result bigger better, e.g. AUC is bigger_better.
        for multi-class task, the y_pred is group by class_id first, then group by row_id
          if you want to get i-th row y_pred in j-th class, the access way is y_pred[j*num_data+i]
wxchan's avatar
wxchan committed
361
362
363
        """
        evals_result = {}
        params = self.get_params()
364
365
366
367
368
        params['verbose'] = -1 if self.silent else 1
        if hasattr(self, 'n_classes_') and self.n_classes_ > 2:
            params['num_class'] = self.n_classes_
        if hasattr(self, 'eval_at'):
            params['ndcg_eval_at'] = self.eval_at
wxchan's avatar
wxchan committed
369
        if self.fobj:
wxchan's avatar
wxchan committed
370
            params['objective'] = 'None'  # objective = nullptr for unknown objective
wxchan's avatar
wxchan committed
371
        if 'label_gain' in params and params['label_gain'] is None:
wxchan's avatar
wxchan committed
372
            del params['label_gain']  # use default of cli version
wxchan's avatar
wxchan committed
373
374

        if callable(eval_metric):
375
            feval = _eval_function_wrapper(eval_metric)
wxchan's avatar
wxchan committed
376
377
        else:
            feval = None
378
            params['metric'] = eval_metric
wxchan's avatar
wxchan committed
379

Guolin Ke's avatar
Guolin Ke committed
380
        def _construct_dataset(X, y, sample_weight, init_score, group, params):
Guolin Ke's avatar
Guolin Ke committed
381
            ret = Dataset(X, label=y, max_bin=self.max_bin, weight=sample_weight, group=group, params=params)
Guolin Ke's avatar
Guolin Ke committed
382
383
384
            ret.set_init_score(init_score)
            return ret

Guolin Ke's avatar
Guolin Ke committed
385
        train_set = _construct_dataset(X, y, sample_weight, init_score, group, params)
Guolin Ke's avatar
Guolin Ke committed
386
387
388
389
390
391
392
393
394
395

        valid_sets = []
        if eval_set is not None:
            if isinstance(eval_set, tuple):
                eval_set = [eval_set]
            for i, valid_data in enumerate(eval_set):
                """reduce cost for prediction training data"""
                if valid_data[0] is X and valid_data[1] is y:
                    valid_set = train_set
                else:
Tsukasa OMOTO's avatar
Tsukasa OMOTO committed
396
397
398
399
                    def get_meta_data(collection, i):
                        if collection is None:
                            return None
                        elif isinstance(collection, list):
400
                            return collection[i] if len(collection) > i else None
Tsukasa OMOTO's avatar
Tsukasa OMOTO committed
401
402
403
404
405
406
407
                        elif isinstance(collection, dict):
                            return collection.get(i, None)
                        else:
                            raise TypeError('eval_sample_weight, eval_init_score, and eval_group should be dict or list')
                    valid_weight = get_meta_data(eval_sample_weight, i)
                    valid_init_score = get_meta_data(eval_init_score, i)
                    valid_group = get_meta_data(eval_group, i)
Guolin Ke's avatar
Guolin Ke committed
408
                    valid_set = _construct_dataset(valid_data[0], valid_data[1], valid_weight, valid_init_score, valid_group, params)
Guolin Ke's avatar
Guolin Ke committed
409
410
411
412
                valid_sets.append(valid_set)

        self._Booster = train(params, train_set,
                              self.n_estimators, valid_sets=valid_sets,
wxchan's avatar
wxchan committed
413
414
                              early_stopping_rounds=early_stopping_rounds,
                              evals_result=evals_result, fobj=self.fobj, feval=feval,
Guolin Ke's avatar
Guolin Ke committed
415
                              verbose_eval=verbose, feature_name=feature_name,
416
417
                              categorical_feature=categorical_feature,
                              callbacks=callbacks)
wxchan's avatar
wxchan committed
418
419

        if evals_result:
420
            self.evals_result = evals_result
wxchan's avatar
wxchan committed
421
422
423
424
425

        if early_stopping_rounds is not None:
            self.best_iteration = self._Booster.best_iteration
        return self

426
    def predict(self, X, raw_score=False, num_iteration=0):
wxchan's avatar
wxchan committed
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
        """
        Return the predicted value for each sample.

        Parameters
        ----------
        X : array_like, shape=[n_samples, n_features]
            Input features matrix.

        num_iteration : int
            Limit number of iterations in the prediction; defaults to 0 (use all trees).

        Returns
        -------
        predicted_result : array_like, shape=[n_samples] or [n_samples, n_classes]
        """
442
        return self.booster_.predict(X, raw_score=raw_score, num_iteration=num_iteration)
wxchan's avatar
wxchan committed
443
444

    def apply(self, X, num_iteration=0):
wxchan's avatar
wxchan committed
445
446
        """
        Return the predicted leaf every tree for each sample.
wxchan's avatar
wxchan committed
447
448
449
450
451
452

        Parameters
        ----------
        X : array_like, shape=[n_samples, n_features]
            Input features matrix.

wxchan's avatar
wxchan committed
453
454
        num_iteration : int
            Limit number of iterations in the prediction; defaults to 0 (use all trees).
wxchan's avatar
wxchan committed
455
456
457
458
459

        Returns
        -------
        X_leaves : array_like, shape=[n_samples, n_trees]
        """
460
        return self.booster_.predict(X, pred_leaf=True, num_iteration=num_iteration)
wxchan's avatar
wxchan committed
461

462
463
464
465
466
467
    @property
    def booster_(self):
        """Get the underlying lightgbm Booster of this model."""
        if self._Booster is None:
            raise LightGBMError('No booster found. Need to call fit beforehand.')
        return self._Booster
wxchan's avatar
wxchan committed
468

469
470
471
472
473
474
475
476
477
478
479
480
    @property
    def evals_result_(self):
        """Get the evaluation results."""
        if self.evals_result is None:
            raise LightGBMError('No results found. Need to call fit with eval set beforehand.')
        return self.evals_result

    @property
    def feature_importance_(self):
        """Get normailized feature importances."""
        importace_array = self.booster_.feature_importance().astype(np.float32)
        return importace_array / importace_array.sum()
wxchan's avatar
wxchan committed
481

482
483
484
    @deprecated('Use attribute booster_ instead.')
    def booster(self):
        return self.booster_
wxchan's avatar
wxchan committed
485

486
    @deprecated('Use attribute feature_importance_ instead.')
487
    def feature_importance(self):
488
        return self.feature_importance_
wxchan's avatar
wxchan committed
489

wxchan's avatar
wxchan committed
490

wxchan's avatar
wxchan committed
491
492
class LGBMRegressor(LGBMModel, LGBMRegressorBase):

Guolin Ke's avatar
Guolin Ke committed
493
494
    def fit(self, X, y,
            sample_weight=None, init_score=None,
495
            eval_set=None, eval_sample_weight=None,
Guolin Ke's avatar
Guolin Ke committed
496
            eval_init_score=None,
wxchan's avatar
wxchan committed
497
            eval_metric="l2",
Guolin Ke's avatar
Guolin Ke committed
498
            early_stopping_rounds=None, verbose=True,
Guolin Ke's avatar
Guolin Ke committed
499
            feature_name=None, categorical_feature=None, callbacks=None):
500
501
502
503
504
505
506
507

        super(LGBMRegressor, self).fit(X, y, sample_weight=sample_weight,
                                       init_score=init_score, eval_set=eval_set,
                                       eval_sample_weight=eval_sample_weight,
                                       eval_init_score=eval_init_score,
                                       eval_metric=eval_metric,
                                       early_stopping_rounds=early_stopping_rounds,
                                       verbose=verbose, feature_name=feature_name,
Guolin Ke's avatar
Guolin Ke committed
508
509
                                       categorical_feature=categorical_feature,
                                       callbacks=callbacks)
Guolin Ke's avatar
Guolin Ke committed
510
511
        return self

wxchan's avatar
wxchan committed
512

wxchan's avatar
wxchan committed
513
514
class LGBMClassifier(LGBMModel, LGBMClassifierBase):

515
    def __init__(self, boosting_type="gbdt", num_leaves=31, max_depth=-1,
Guolin Ke's avatar
Guolin Ke committed
516
                 learning_rate=0.1, n_estimators=10, max_bin=255,
wxchan's avatar
wxchan committed
517
518
                 subsample_for_bin=50000, objective="binary",
                 min_split_gain=0, min_child_weight=5, min_child_samples=10,
Guolin Ke's avatar
Guolin Ke committed
519
520
                 subsample=1, subsample_freq=1, colsample_bytree=1,
                 reg_alpha=0, reg_lambda=0, scale_pos_weight=1,
wxchan's avatar
wxchan committed
521
522
                 is_unbalance=False, seed=0, nthread=-1,
                 silent=True, sigmoid=1.0,
523
524
                 drop_rate=0.1, skip_drop=0.5, max_drop=50,
                 uniform_drop=False, xgboost_dart_mode=False):
525
        self.classes, self.n_classes = None, None
526
527
528
        super(LGBMClassifier, self).__init__(boosting_type=boosting_type, num_leaves=num_leaves,
                                             max_depth=max_depth, learning_rate=learning_rate,
                                             n_estimators=n_estimators, max_bin=max_bin,
wxchan's avatar
wxchan committed
529
                                             subsample_for_bin=subsample_for_bin, objective=objective,
530
531
532
533
                                             min_split_gain=min_split_gain, min_child_weight=min_child_weight,
                                             min_child_samples=min_child_samples, subsample=subsample,
                                             subsample_freq=subsample_freq, colsample_bytree=colsample_bytree,
                                             reg_alpha=reg_alpha, reg_lambda=reg_lambda,
wxchan's avatar
wxchan committed
534
535
                                             scale_pos_weight=scale_pos_weight, is_unbalance=is_unbalance,
                                             seed=seed, nthread=nthread, silent=silent, sigmoid=sigmoid,
536
537
                                             drop_rate=drop_rate, skip_drop=skip_drop, max_drop=max_drop,
                                             uniform_drop=uniform_drop, xgboost_dart_mode=xgboost_dart_mode)
Guolin Ke's avatar
Guolin Ke committed
538
539
540

    def fit(self, X, y,
            sample_weight=None, init_score=None,
541
            eval_set=None, eval_sample_weight=None,
Guolin Ke's avatar
Guolin Ke committed
542
            eval_init_score=None,
wxchan's avatar
wxchan committed
543
            eval_metric="binary_logloss",
wxchan's avatar
wxchan committed
544
            early_stopping_rounds=None, verbose=True,
545
546
            feature_name=None, categorical_feature=None,
            callbacks=None):
547
548
549
        self._le = LGBMLabelEncoder().fit(y)
        y = self._le.transform(y)

550
551
552
        self.classes = self._le.classes_
        self.n_classes = len(self.classes_)
        if self.n_classes > 2:
wxchan's avatar
wxchan committed
553
554
            # Switch to using a multiclass objective in the underlying LGBM instance
            self.objective = "multiclass"
wxchan's avatar
wxchan committed
555
556
            if eval_set is not None and eval_metric == "binary_logloss":
                eval_metric = "multi_logloss"
wxchan's avatar
wxchan committed
557
558

        if eval_set is not None:
559
560
561
562
563
564
565
566
567
            eval_set = [(x[0], self._le.transform(x[1])) for x in eval_set]

        super(LGBMClassifier, self).fit(X, y, sample_weight=sample_weight,
                                        init_score=init_score, eval_set=eval_set,
                                        eval_sample_weight=eval_sample_weight,
                                        eval_init_score=eval_init_score,
                                        eval_metric=eval_metric,
                                        early_stopping_rounds=early_stopping_rounds,
                                        verbose=verbose, feature_name=feature_name,
568
569
                                        categorical_feature=categorical_feature,
                                        callbacks=callbacks)
wxchan's avatar
wxchan committed
570
571
        return self

572
573
574
575
    def predict(self, X, raw_score=False, num_iteration=0):
        class_probs = self.predict_proba(X, raw_score, num_iteration)
        class_index = np.argmax(class_probs, axis=1)
        return self._le.inverse_transform(class_index)
wxchan's avatar
wxchan committed
576

577
    def predict_proba(self, X, raw_score=False, num_iteration=0):
wxchan's avatar
wxchan committed
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
        """
        Return the predicted probability for each class for each sample.

        Parameters
        ----------
        X : array_like, shape=[n_samples, n_features]
            Input features matrix.

        num_iteration : int
            Limit number of iterations in the prediction; defaults to 0 (use all trees).

        Returns
        -------
        predicted_probability : array_like, shape=[n_samples, n_classes]
        """
593
594
        class_probs = self.booster_.predict(X, raw_score=raw_score, num_iteration=num_iteration)
        if self.n_classes > 2:
wxchan's avatar
wxchan committed
595
596
            return class_probs
        else:
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
            return np.vstack((1. - class_probs, class_probs)).transpose()

    @property
    def classes_(self):
        """Get class label array."""
        if self.classes is None:
            raise LightGBMError('No classes found. Need to call fit beforehand.')
        return self.classes

    @property
    def n_classes_(self):
        """Get number of classes"""
        if self.n_classes is None:
            raise LightGBMError('No classes found. Need to call fit beforehand.')
        return self.n_classes
wxchan's avatar
wxchan committed
612

wxchan's avatar
wxchan committed
613

wxchan's avatar
wxchan committed
614
615
class LGBMRanker(LGBMModel):

616
    def __init__(self, boosting_type="gbdt", num_leaves=31, max_depth=-1,
Guolin Ke's avatar
Guolin Ke committed
617
                 learning_rate=0.1, n_estimators=10, max_bin=255,
wxchan's avatar
wxchan committed
618
619
                 subsample_for_bin=50000, objective="lambdarank",
                 min_split_gain=0, min_child_weight=5, min_child_samples=10,
Guolin Ke's avatar
Guolin Ke committed
620
621
                 subsample=1, subsample_freq=1, colsample_bytree=1,
                 reg_alpha=0, reg_lambda=0, scale_pos_weight=1,
wxchan's avatar
wxchan committed
622
623
                 is_unbalance=False, seed=0, nthread=-1, silent=True,
                 sigmoid=1.0, max_position=20, label_gain=None,
624
625
626
627
628
                 drop_rate=0.1, skip_drop=0.5, max_drop=50,
                 uniform_drop=False, xgboost_dart_mode=False):
        super(LGBMRanker, self).__init__(boosting_type=boosting_type, num_leaves=num_leaves,
                                         max_depth=max_depth, learning_rate=learning_rate,
                                         n_estimators=n_estimators, max_bin=max_bin,
wxchan's avatar
wxchan committed
629
                                         subsample_for_bin=subsample_for_bin, objective=objective,
630
631
632
633
                                         min_split_gain=min_split_gain, min_child_weight=min_child_weight,
                                         min_child_samples=min_child_samples, subsample=subsample,
                                         subsample_freq=subsample_freq, colsample_bytree=colsample_bytree,
                                         reg_alpha=reg_alpha, reg_lambda=reg_lambda,
wxchan's avatar
wxchan committed
634
635
636
                                         scale_pos_weight=scale_pos_weight, is_unbalance=is_unbalance,
                                         seed=seed, nthread=nthread, silent=silent,
                                         sigmoid=sigmoid, max_position=max_position, label_gain=label_gain,
637
638
                                         drop_rate=drop_rate, skip_drop=skip_drop, max_drop=max_drop,
                                         uniform_drop=uniform_drop, xgboost_dart_mode=xgboost_dart_mode)
Guolin Ke's avatar
Guolin Ke committed
639
640

    def fit(self, X, y,
641
642
            sample_weight=None, init_score=None, group=None,
            eval_set=None, eval_sample_weight=None,
Guolin Ke's avatar
Guolin Ke committed
643
            eval_init_score=None, eval_group=None,
wxchan's avatar
wxchan committed
644
            eval_metric='ndcg', eval_at=1,
wxchan's avatar
wxchan committed
645
            early_stopping_rounds=None, verbose=True,
646
647
            feature_name=None, categorical_feature=None,
            callbacks=None):
Guolin Ke's avatar
Guolin Ke committed
648
        """
wxchan's avatar
wxchan committed
649
        Most arguments like common methods except following:
Guolin Ke's avatar
Guolin Ke committed
650
651
652
653

        eval_at : list of int
            The evaulation positions of NDCG
        """
wxchan's avatar
wxchan committed
654
655

        """check group data"""
Guolin Ke's avatar
Guolin Ke committed
656
        if group is None:
657
            raise ValueError("Should set group for ranking task")
wxchan's avatar
wxchan committed
658
659

        if eval_set is not None:
Guolin Ke's avatar
Guolin Ke committed
660
            if eval_group is None:
661
                raise ValueError("Eval_group cannot be None when eval_set is not None")
Guolin Ke's avatar
Guolin Ke committed
662
            elif len(eval_group) != len(eval_set):
663
                raise ValueError("Length of eval_group should equal to eval_set")
664
            elif (isinstance(eval_group, dict) and any(i not in eval_group or eval_group[i] is None for i in range(len(eval_group)))) \
wxchan's avatar
wxchan committed
665
                    or (isinstance(eval_group, list) and any(group is None for group in eval_group)):
666
                raise ValueError("Should set group for all eval dataset for ranking task; if you use dict, the index should start from 0")
667

Guolin Ke's avatar
Guolin Ke committed
668
        if eval_at is not None:
669
670
671
672
673
674
675
676
            self.eval_at = eval_at
        super(LGBMRanker, self).fit(X, y, sample_weight=sample_weight,
                                    init_score=init_score, group=group,
                                    eval_set=eval_set, eval_sample_weight=eval_sample_weight,
                                    eval_init_score=eval_init_score, eval_group=eval_group,
                                    eval_metric=eval_metric,
                                    early_stopping_rounds=early_stopping_rounds,
                                    verbose=verbose, feature_name=feature_name,
677
678
                                    categorical_feature=categorical_feature,
                                    callbacks=callbacks)
wxchan's avatar
wxchan committed
679
        return self