test_engine.py 106 KB
Newer Older
Guolin Ke's avatar
Guolin Ke committed
1
# coding: utf-8
wxchan's avatar
wxchan committed
2
import copy
3
import itertools
wxchan's avatar
wxchan committed
4
5
import math
import os
6
import psutil
7
import random
wxchan's avatar
wxchan committed
8
9
import unittest

Guolin Ke's avatar
Guolin Ke committed
10
import lightgbm as lgb
wxchan's avatar
wxchan committed
11
import numpy as np
12
from scipy.sparse import csr_matrix, isspmatrix_csr, isspmatrix_csc
wxchan's avatar
wxchan committed
13
from sklearn.datasets import (load_boston, load_breast_cancer, load_digits,
14
                              load_iris, load_svmlight_file, make_multilabel_classification)
15
from sklearn.metrics import log_loss, mean_absolute_error, mean_squared_error, roc_auc_score
16
from sklearn.model_selection import train_test_split, TimeSeriesSplit, GroupKFold
wxchan's avatar
wxchan committed
17

wxchan's avatar
wxchan committed
18
19
try:
    import cPickle as pickle
wxchan's avatar
wxchan committed
20
except ImportError:
wxchan's avatar
wxchan committed
21
    import pickle
wxchan's avatar
wxchan committed
22

wxchan's avatar
wxchan committed
23

24
25
26
27
28
29
30
decreasing_generator = itertools.count(0, -1)


def dummy_obj(preds, train_data):
    return np.ones(preds.shape), np.ones(preds.shape)


wxchan's avatar
wxchan committed
31
32
33
def multi_logloss(y_true, y_pred):
    return np.mean([-math.log(y_pred[i][y]) for i, y in enumerate(y_true)])

wxchan's avatar
wxchan committed
34

Belinda Trotta's avatar
Belinda Trotta committed
35
36
37
38
39
40
41
def top_k_error(y_true, y_pred, k):
    if k == y_pred.shape[1]:
        return 0
    max_rest = np.max(-np.partition(-y_pred, k)[:, k:], axis=1)
    return 1 - np.mean((y_pred[np.arange(len(y_true)), y_true] > max_rest))


42
43
44
45
46
47
48
49
def constant_metric(preds, train_data):
    return ('error', 0.0, False)


def decreasing_metric(preds, train_data):
    return ('decreasing_metric', next(decreasing_generator), False)


50
51
52
53
def categorize(continuous_x):
    return np.digitize(continuous_x, bins=np.arange(0, 1, 0.01))


wxchan's avatar
wxchan committed
54
class TestEngine(unittest.TestCase):
wxchan's avatar
wxchan committed
55
    def test_binary(self):
56
57
        X, y = load_breast_cancer(True)
        X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
wxchan's avatar
wxchan committed
58
        params = {
wxchan's avatar
wxchan committed
59
            'objective': 'binary',
60
            'metric': 'binary_logloss',
61
62
            'verbose': -1,
            'num_iteration': 50  # test num_iteration in dict here
wxchan's avatar
wxchan committed
63
        }
64
65
66
67
        lgb_train = lgb.Dataset(X_train, y_train)
        lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)
        evals_result = {}
        gbm = lgb.train(params, lgb_train,
68
                        num_boost_round=20,
69
70
71
72
                        valid_sets=lgb_eval,
                        verbose_eval=False,
                        evals_result=evals_result)
        ret = log_loss(y_test, gbm.predict(X_test))
73
        self.assertLess(ret, 0.14)
74
        self.assertEqual(len(evals_result['valid_0']['binary_logloss']), 50)
75
        self.assertAlmostEqual(evals_result['valid_0']['binary_logloss'][-1], ret, places=5)
wxchan's avatar
wxchan committed
76

Guolin Ke's avatar
Guolin Ke committed
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
    def test_rf(self):
        X, y = load_breast_cancer(True)
        X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
        params = {
            'boosting_type': 'rf',
            'objective': 'binary',
            'bagging_freq': 1,
            'bagging_fraction': 0.5,
            'feature_fraction': 0.5,
            'num_leaves': 50,
            'metric': 'binary_logloss',
            'verbose': -1
        }
        lgb_train = lgb.Dataset(X_train, y_train)
        lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)
        evals_result = {}
        gbm = lgb.train(params, lgb_train,
                        num_boost_round=50,
                        valid_sets=lgb_eval,
                        verbose_eval=False,
                        evals_result=evals_result)
        ret = log_loss(y_test, gbm.predict(X_test))
99
        self.assertLess(ret, 0.19)
Guolin Ke's avatar
Guolin Ke committed
100
101
        self.assertAlmostEqual(evals_result['valid_0']['binary_logloss'][-1], ret, places=5)

102
    def test_regression(self):
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
        X, y = load_boston(True)
        X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
        params = {
            'metric': 'l2',
            'verbose': -1
        }
        lgb_train = lgb.Dataset(X_train, y_train)
        lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)
        evals_result = {}
        gbm = lgb.train(params, lgb_train,
                        num_boost_round=50,
                        valid_sets=lgb_eval,
                        verbose_eval=False,
                        evals_result=evals_result)
        ret = mean_squared_error(y_test, gbm.predict(X_test))
118
        self.assertLess(ret, 7)
119
        self.assertAlmostEqual(evals_result['valid_0']['l2'][-1], ret, places=5)
wxchan's avatar
wxchan committed
120

Guolin Ke's avatar
Guolin Ke committed
121
    def test_missing_value_handle(self):
122
123
124
        X_train = np.zeros((100, 1))
        y_train = np.zeros(100)
        trues = random.sample(range(100), 20)
Guolin Ke's avatar
Guolin Ke committed
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
        for idx in trues:
            X_train[idx, 0] = np.nan
            y_train[idx] = 1
        lgb_train = lgb.Dataset(X_train, y_train)
        lgb_eval = lgb.Dataset(X_train, y_train)

        params = {
            'metric': 'l2',
            'verbose': -1,
            'boost_from_average': False
        }
        evals_result = {}
        gbm = lgb.train(params, lgb_train,
                        num_boost_round=20,
                        valid_sets=lgb_eval,
140
                        verbose_eval=False,
Guolin Ke's avatar
Guolin Ke committed
141
142
143
144
145
                        evals_result=evals_result)
        ret = mean_squared_error(y_train, gbm.predict(X_train))
        self.assertLess(ret, 0.005)
        self.assertAlmostEqual(evals_result['valid_0']['l2'][-1], ret, places=5)

146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
    def test_missing_value_handle_more_na(self):
        X_train = np.ones((100, 1))
        y_train = np.ones(100)
        trues = random.sample(range(100), 80)
        for idx in trues:
            X_train[idx, 0] = np.nan
            y_train[idx] = 0
        lgb_train = lgb.Dataset(X_train, y_train)
        lgb_eval = lgb.Dataset(X_train, y_train)

        params = {
            'metric': 'l2',
            'verbose': -1,
            'boost_from_average': False
        }
        evals_result = {}
        gbm = lgb.train(params, lgb_train,
                        num_boost_round=20,
                        valid_sets=lgb_eval,
                        verbose_eval=False,
                        evals_result=evals_result)
        ret = mean_squared_error(y_train, gbm.predict(X_train))
        self.assertLess(ret, 0.005)
        self.assertAlmostEqual(evals_result['valid_0']['l2'][-1], ret, places=5)

Guolin Ke's avatar
Guolin Ke committed
171
172
173
174
175
176
177
178
179
180
    def test_missing_value_handle_na(self):
        x = [0, 1, 2, 3, 4, 5, 6, 7, np.nan]
        y = [1, 1, 1, 1, 0, 0, 0, 0, 1]

        X_train = np.array(x).reshape(len(x), 1)
        y_train = np.array(y)
        lgb_train = lgb.Dataset(X_train, y_train)
        lgb_eval = lgb.Dataset(X_train, y_train)

        params = {
ChenZhiyong's avatar
ChenZhiyong committed
181
            'objective': 'regression',
Guolin Ke's avatar
Guolin Ke committed
182
183
184
185
186
187
188
189
190
191
192
193
194
            'metric': 'auc',
            'verbose': -1,
            'boost_from_average': False,
            'min_data': 1,
            'num_leaves': 2,
            'learning_rate': 1,
            'min_data_in_bin': 1,
            'zero_as_missing': False
        }
        evals_result = {}
        gbm = lgb.train(params, lgb_train,
                        num_boost_round=1,
                        valid_sets=lgb_eval,
195
                        verbose_eval=False,
Guolin Ke's avatar
Guolin Ke committed
196
197
                        evals_result=evals_result)
        pred = gbm.predict(X_train)
198
        np.testing.assert_allclose(pred, y)
199
200
201
        ret = roc_auc_score(y_train, pred)
        self.assertGreater(ret, 0.999)
        self.assertAlmostEqual(evals_result['valid_0']['auc'][-1], ret, places=5)
Guolin Ke's avatar
Guolin Ke committed
202
203
204
205
206
207
208
209
210
211
212

    def test_missing_value_handle_zero(self):
        x = [0, 1, 2, 3, 4, 5, 6, 7, np.nan]
        y = [0, 1, 1, 1, 0, 0, 0, 0, 0]

        X_train = np.array(x).reshape(len(x), 1)
        y_train = np.array(y)
        lgb_train = lgb.Dataset(X_train, y_train)
        lgb_eval = lgb.Dataset(X_train, y_train)

        params = {
ChenZhiyong's avatar
ChenZhiyong committed
213
            'objective': 'regression',
Guolin Ke's avatar
Guolin Ke committed
214
215
216
217
218
219
220
221
222
223
224
225
226
            'metric': 'auc',
            'verbose': -1,
            'boost_from_average': False,
            'min_data': 1,
            'num_leaves': 2,
            'learning_rate': 1,
            'min_data_in_bin': 1,
            'zero_as_missing': True
        }
        evals_result = {}
        gbm = lgb.train(params, lgb_train,
                        num_boost_round=1,
                        valid_sets=lgb_eval,
227
                        verbose_eval=False,
Guolin Ke's avatar
Guolin Ke committed
228
229
                        evals_result=evals_result)
        pred = gbm.predict(X_train)
230
        np.testing.assert_allclose(pred, y)
231
232
233
        ret = roc_auc_score(y_train, pred)
        self.assertGreater(ret, 0.999)
        self.assertAlmostEqual(evals_result['valid_0']['auc'][-1], ret, places=5)
Guolin Ke's avatar
Guolin Ke committed
234
235
236
237
238
239
240
241
242
243
244

    def test_missing_value_handle_none(self):
        x = [0, 1, 2, 3, 4, 5, 6, 7, np.nan]
        y = [0, 1, 1, 1, 0, 0, 0, 0, 0]

        X_train = np.array(x).reshape(len(x), 1)
        y_train = np.array(y)
        lgb_train = lgb.Dataset(X_train, y_train)
        lgb_eval = lgb.Dataset(X_train, y_train)

        params = {
ChenZhiyong's avatar
ChenZhiyong committed
245
            'objective': 'regression',
Guolin Ke's avatar
Guolin Ke committed
246
247
248
249
250
251
252
253
254
255
256
257
258
            'metric': 'auc',
            'verbose': -1,
            'boost_from_average': False,
            'min_data': 1,
            'num_leaves': 2,
            'learning_rate': 1,
            'min_data_in_bin': 1,
            'use_missing': False
        }
        evals_result = {}
        gbm = lgb.train(params, lgb_train,
                        num_boost_round=1,
                        valid_sets=lgb_eval,
259
                        verbose_eval=False,
Guolin Ke's avatar
Guolin Ke committed
260
261
                        evals_result=evals_result)
        pred = gbm.predict(X_train)
262
263
        self.assertAlmostEqual(pred[0], pred[1])
        self.assertAlmostEqual(pred[-1], pred[0])
264
265
266
        ret = roc_auc_score(y_train, pred)
        self.assertGreater(ret, 0.83)
        self.assertAlmostEqual(evals_result['valid_0']['auc'][-1], ret, places=5)
Guolin Ke's avatar
Guolin Ke committed
267

ChenZhiyong's avatar
ChenZhiyong committed
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
    def test_categorical_handle(self):
        x = [0, 1, 2, 3, 4, 5, 6, 7]
        y = [0, 1, 0, 1, 0, 1, 0, 1]

        X_train = np.array(x).reshape(len(x), 1)
        y_train = np.array(y)
        lgb_train = lgb.Dataset(X_train, y_train)
        lgb_eval = lgb.Dataset(X_train, y_train)

        params = {
            'objective': 'regression',
            'metric': 'auc',
            'verbose': -1,
            'boost_from_average': False,
            'min_data': 1,
            'num_leaves': 2,
            'learning_rate': 1,
            'min_data_in_bin': 1,
            'min_data_per_group': 1,
287
            'cat_smooth': 1,
Guolin Ke's avatar
Guolin Ke committed
288
            'cat_l2': 0,
289
            'max_cat_to_onehot': 1,
ChenZhiyong's avatar
ChenZhiyong committed
290
291
292
293
294
295
296
            'zero_as_missing': True,
            'categorical_column': 0
        }
        evals_result = {}
        gbm = lgb.train(params, lgb_train,
                        num_boost_round=1,
                        valid_sets=lgb_eval,
297
                        verbose_eval=False,
ChenZhiyong's avatar
ChenZhiyong committed
298
299
                        evals_result=evals_result)
        pred = gbm.predict(X_train)
300
        np.testing.assert_allclose(pred, y)
301
302
303
        ret = roc_auc_score(y_train, pred)
        self.assertGreater(ret, 0.999)
        self.assertAlmostEqual(evals_result['valid_0']['auc'][-1], ret, places=5)
ChenZhiyong's avatar
ChenZhiyong committed
304

305
    def test_categorical_handle_na(self):
Guolin Ke's avatar
Guolin Ke committed
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
        x = [0, np.nan, 0, np.nan, 0, np.nan]
        y = [0, 1, 0, 1, 0, 1]

        X_train = np.array(x).reshape(len(x), 1)
        y_train = np.array(y)
        lgb_train = lgb.Dataset(X_train, y_train)
        lgb_eval = lgb.Dataset(X_train, y_train)

        params = {
            'objective': 'regression',
            'metric': 'auc',
            'verbose': -1,
            'boost_from_average': False,
            'min_data': 1,
            'num_leaves': 2,
            'learning_rate': 1,
            'min_data_in_bin': 1,
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
            'min_data_per_group': 1,
            'cat_smooth': 1,
            'cat_l2': 0,
            'max_cat_to_onehot': 1,
            'zero_as_missing': False,
            'categorical_column': 0
        }
        evals_result = {}
        gbm = lgb.train(params, lgb_train,
                        num_boost_round=1,
                        valid_sets=lgb_eval,
                        verbose_eval=False,
                        evals_result=evals_result)
        pred = gbm.predict(X_train)
        np.testing.assert_allclose(pred, y)
        ret = roc_auc_score(y_train, pred)
        self.assertGreater(ret, 0.999)
        self.assertAlmostEqual(evals_result['valid_0']['auc'][-1], ret, places=5)

    def test_categorical_non_zero_inputs(self):
        x = [1, 1, 1, 1, 1, 1, 2, 2]
        y = [1, 1, 1, 1, 1, 1, 0, 0]

        X_train = np.array(x).reshape(len(x), 1)
        y_train = np.array(y)
        lgb_train = lgb.Dataset(X_train, y_train)
        lgb_eval = lgb.Dataset(X_train, y_train)

        params = {
            'objective': 'regression',
            'metric': 'auc',
            'verbose': -1,
            'boost_from_average': False,
            'min_data': 1,
            'num_leaves': 2,
            'learning_rate': 1,
            'min_data_in_bin': 1,
Guolin Ke's avatar
Guolin Ke committed
360
            'min_data_per_group': 1,
361
            'cat_smooth': 1,
Guolin Ke's avatar
Guolin Ke committed
362
            'cat_l2': 0,
363
            'max_cat_to_onehot': 1,
Guolin Ke's avatar
Guolin Ke committed
364
365
366
367
368
369
370
            'zero_as_missing': False,
            'categorical_column': 0
        }
        evals_result = {}
        gbm = lgb.train(params, lgb_train,
                        num_boost_round=1,
                        valid_sets=lgb_eval,
371
                        verbose_eval=False,
Guolin Ke's avatar
Guolin Ke committed
372
373
                        evals_result=evals_result)
        pred = gbm.predict(X_train)
374
        np.testing.assert_allclose(pred, y)
375
376
377
        ret = roc_auc_score(y_train, pred)
        self.assertGreater(ret, 0.999)
        self.assertAlmostEqual(evals_result['valid_0']['auc'][-1], ret, places=5)
Guolin Ke's avatar
Guolin Ke committed
378

wxchan's avatar
wxchan committed
379
    def test_multiclass(self):
380
381
        X, y = load_digits(10, True)
        X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
wxchan's avatar
wxchan committed
382
        params = {
wxchan's avatar
wxchan committed
383
384
            'objective': 'multiclass',
            'metric': 'multi_logloss',
385
386
            'num_class': 10,
            'verbose': -1
wxchan's avatar
wxchan committed
387
        }
388
389
390
391
392
393
394
395
396
        lgb_train = lgb.Dataset(X_train, y_train, params=params)
        lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train, params=params)
        evals_result = {}
        gbm = lgb.train(params, lgb_train,
                        num_boost_round=50,
                        valid_sets=lgb_eval,
                        verbose_eval=False,
                        evals_result=evals_result)
        ret = multi_logloss(y_test, gbm.predict(X_test))
397
        self.assertLess(ret, 0.16)
398
        self.assertAlmostEqual(evals_result['valid_0']['multi_logloss'][-1], ret, places=5)
wxchan's avatar
wxchan committed
399

400
401
402
403
404
405
406
407
408
409
410
411
412
    def test_multiclass_rf(self):
        X, y = load_digits(10, True)
        X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
        params = {
            'boosting_type': 'rf',
            'objective': 'multiclass',
            'metric': 'multi_logloss',
            'bagging_freq': 1,
            'bagging_fraction': 0.6,
            'feature_fraction': 0.6,
            'num_class': 10,
            'num_leaves': 50,
            'min_data': 1,
413
414
            'verbose': -1,
            'gpu_use_dp': True
415
416
417
418
419
        }
        lgb_train = lgb.Dataset(X_train, y_train, params=params)
        lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train, params=params)
        evals_result = {}
        gbm = lgb.train(params, lgb_train,
420
                        num_boost_round=50,
421
422
423
424
                        valid_sets=lgb_eval,
                        verbose_eval=False,
                        evals_result=evals_result)
        ret = multi_logloss(y_test, gbm.predict(X_test))
425
        self.assertLess(ret, 0.23)
426
427
        self.assertAlmostEqual(evals_result['valid_0']['multi_logloss'][-1], ret, places=5)

cbecker's avatar
cbecker committed
428
429
430
431
432
433
434
435
436
437
438
    def test_multiclass_prediction_early_stopping(self):
        X, y = load_digits(10, True)
        X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
        params = {
            'objective': 'multiclass',
            'metric': 'multi_logloss',
            'num_class': 10,
            'verbose': -1
        }
        lgb_train = lgb.Dataset(X_train, y_train, params=params)
        gbm = lgb.train(params, lgb_train,
439
                        num_boost_round=50)
cbecker's avatar
cbecker committed
440

441
442
443
        pred_parameter = {"pred_early_stop": True,
                          "pred_early_stop_freq": 5,
                          "pred_early_stop_margin": 1.5}
444
        ret = multi_logloss(y_test, gbm.predict(X_test, **pred_parameter))
cbecker's avatar
cbecker committed
445
        self.assertLess(ret, 0.8)
446
        self.assertGreater(ret, 0.6)  # loss will be higher than when evaluating the full model
cbecker's avatar
cbecker committed
447

448
449
450
        pred_parameter = {"pred_early_stop": True,
                          "pred_early_stop_freq": 5,
                          "pred_early_stop_margin": 5.5}
451
        ret = multi_logloss(y_test, gbm.predict(X_test, **pred_parameter))
cbecker's avatar
cbecker committed
452
453
        self.assertLess(ret, 0.2)

Belinda Trotta's avatar
Belinda Trotta committed
454
    def test_multi_class_error(self):
455
456
457
        X, y = load_digits(10, True)
        params = {'objective': 'multiclass', 'num_classes': 10, 'metric': 'multi_error',
                  'num_leaves': 4, 'verbose': -1}
Belinda Trotta's avatar
Belinda Trotta committed
458
        lgb_data = lgb.Dataset(X, label=y)
459
        est = lgb.train(params, lgb_data, num_boost_round=10)
Belinda Trotta's avatar
Belinda Trotta committed
460
461
        predict_default = est.predict(X)
        results = {}
462
463
        est = lgb.train(dict(params, multi_error_top_k=1), lgb_data, num_boost_round=10,
                        valid_sets=[lgb_data], evals_result=results, verbose_eval=False)
Belinda Trotta's avatar
Belinda Trotta committed
464
465
        predict_1 = est.predict(X)
        # check that default gives same result as k = 1
466
        np.testing.assert_allclose(predict_1, predict_default)
Belinda Trotta's avatar
Belinda Trotta committed
467
468
        # check against independent calculation for k = 1
        err = top_k_error(y, predict_1, 1)
469
        self.assertAlmostEqual(results['training']['multi_error'][-1], err)
Belinda Trotta's avatar
Belinda Trotta committed
470
471
        # check against independent calculation for k = 2
        results = {}
472
473
        est = lgb.train(dict(params, multi_error_top_k=2), lgb_data, num_boost_round=10,
                        valid_sets=[lgb_data], evals_result=results, verbose_eval=False)
Belinda Trotta's avatar
Belinda Trotta committed
474
475
        predict_2 = est.predict(X)
        err = top_k_error(y, predict_2, 2)
476
        self.assertAlmostEqual(results['training']['multi_error@2'][-1], err)
Belinda Trotta's avatar
Belinda Trotta committed
477
478
        # check against independent calculation for k = 10
        results = {}
479
480
481
482
483
484
        est = lgb.train(dict(params, multi_error_top_k=10), lgb_data, num_boost_round=10,
                        valid_sets=[lgb_data], evals_result=results, verbose_eval=False)
        predict_3 = est.predict(X)
        err = top_k_error(y, predict_3, 10)
        self.assertAlmostEqual(results['training']['multi_error@10'][-1], err)
        # check cases where predictions are equal
Belinda Trotta's avatar
Belinda Trotta committed
485
486
487
        X = np.array([[0, 0], [0, 0]])
        y = np.array([0, 1])
        lgb_data = lgb.Dataset(X, label=y)
488
        params['num_classes'] = 2
Belinda Trotta's avatar
Belinda Trotta committed
489
        results = {}
490
491
492
        lgb.train(params, lgb_data, num_boost_round=10,
                  valid_sets=[lgb_data], evals_result=results, verbose_eval=False)
        self.assertAlmostEqual(results['training']['multi_error'][-1], 1)
Belinda Trotta's avatar
Belinda Trotta committed
493
        results = {}
494
495
496
        lgb.train(dict(params, multi_error_top_k=2), lgb_data, num_boost_round=10,
                  valid_sets=[lgb_data], evals_result=results, verbose_eval=False)
        self.assertAlmostEqual(results['training']['multi_error@2'][-1], 0)
Belinda Trotta's avatar
Belinda Trotta committed
497

Belinda Trotta's avatar
Belinda Trotta committed
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
    def test_auc_mu(self):
        # should give same result as binary auc for 2 classes
        X, y = load_digits(10, True)
        y_new = np.zeros((len(y)))
        y_new[y != 0] = 1
        lgb_X = lgb.Dataset(X, label=y_new)
        params = {'objective': 'multiclass',
                  'metric': 'auc_mu',
                  'verbose': -1,
                  'num_classes': 2,
                  'seed': 0}
        results_auc_mu = {}
        lgb.train(params, lgb_X, num_boost_round=10, valid_sets=[lgb_X], evals_result=results_auc_mu)
        params = {'objective': 'binary',
                  'metric': 'auc',
                  'verbose': -1,
                  'seed': 0}
        results_auc = {}
        lgb.train(params, lgb_X, num_boost_round=10, valid_sets=[lgb_X], evals_result=results_auc)
        np.testing.assert_allclose(results_auc_mu['training']['auc_mu'], results_auc['training']['auc'])
        # test the case where all predictions are equal
        lgb_X = lgb.Dataset(X[:10], label=y_new[:10])
        params = {'objective': 'multiclass',
                  'metric': 'auc_mu',
                  'verbose': -1,
                  'num_classes': 2,
                  'min_data_in_leaf': 20,
                  'seed': 0}
        results_auc_mu = {}
        lgb.train(params, lgb_X, num_boost_round=10, valid_sets=[lgb_X], evals_result=results_auc_mu)
        self.assertAlmostEqual(results_auc_mu['training']['auc_mu'][-1], 0.5)
        # should give 1 when accuracy = 1
        X = X[:10, :]
        y = y[:10]
        lgb_X = lgb.Dataset(X, label=y)
        params = {'objective': 'multiclass',
                  'metric': 'auc_mu',
                  'num_classes': 10,
                  'min_data_in_leaf': 1,
                  'verbose': -1}
        results = {}
        lgb.train(params, lgb_X, num_boost_round=100, valid_sets=[lgb_X], evals_result=results)
        self.assertAlmostEqual(results['training']['auc_mu'][-1], 1)
        # test loading weights
        Xy = np.loadtxt(os.path.join(os.path.dirname(os.path.realpath(__file__)),
                                     '../../examples/multiclass_classification/multiclass.train'))
        y = Xy[:, 0]
        X = Xy[:, 1:]
        lgb_X = lgb.Dataset(X, label=y)
        params = {'objective': 'multiclass',
                  'metric': 'auc_mu',
                  'auc_mu_weights': [0, 2, 2, 2, 2, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0],
                  'num_classes': 5,
                  'verbose': -1,
                  'seed': 0}
        results_weight = {}
        lgb.train(params, lgb_X, num_boost_round=5, valid_sets=[lgb_X], evals_result=results_weight)
        params['auc_mu_weights'] = []
        results_no_weight = {}
        lgb.train(params, lgb_X, num_boost_round=5, valid_sets=[lgb_X], evals_result=results_no_weight)
        self.assertNotEqual(results_weight['training']['auc_mu'][-1], results_no_weight['training']['auc_mu'][-1])

560
    def test_early_stopping(self):
561
        X, y = load_breast_cancer(True)
562
563
564
        params = {
            'objective': 'binary',
            'metric': 'binary_logloss',
565
            'verbose': -1
566
        }
567
        X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
568
569
        lgb_train = lgb.Dataset(X_train, y_train)
        lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)
wxchan's avatar
wxchan committed
570
        valid_set_name = 'valid_set'
571
572
573
574
        # no early stopping
        gbm = lgb.train(params, lgb_train,
                        num_boost_round=10,
                        valid_sets=lgb_eval,
wxchan's avatar
wxchan committed
575
                        valid_names=valid_set_name,
576
577
                        verbose_eval=False,
                        early_stopping_rounds=5)
578
        self.assertEqual(gbm.best_iteration, 10)
wxchan's avatar
wxchan committed
579
580
        self.assertIn(valid_set_name, gbm.best_score)
        self.assertIn('binary_logloss', gbm.best_score[valid_set_name])
581
582
        # early stopping occurs
        gbm = lgb.train(params, lgb_train,
583
                        num_boost_round=40,
584
                        valid_sets=lgb_eval,
wxchan's avatar
wxchan committed
585
                        valid_names=valid_set_name,
586
587
                        verbose_eval=False,
                        early_stopping_rounds=5)
588
        self.assertLessEqual(gbm.best_iteration, 39)
wxchan's avatar
wxchan committed
589
590
        self.assertIn(valid_set_name, gbm.best_score)
        self.assertIn('binary_logloss', gbm.best_score[valid_set_name])
591

592
    def test_continue_train(self):
593
594
        X, y = load_boston(True)
        X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
wxchan's avatar
wxchan committed
595
        params = {
wxchan's avatar
wxchan committed
596
            'objective': 'regression',
597
598
            'metric': 'l1',
            'verbose': -1
wxchan's avatar
wxchan committed
599
        }
600
601
602
        lgb_train = lgb.Dataset(X_train, y_train, free_raw_data=False)
        lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train, free_raw_data=False)
        init_gbm = lgb.train(params, lgb_train, num_boost_round=20)
wxchan's avatar
wxchan committed
603
        model_name = 'model.txt'
604
605
606
607
608
609
610
        init_gbm.save_model(model_name)
        evals_result = {}
        gbm = lgb.train(params, lgb_train,
                        num_boost_round=30,
                        valid_sets=lgb_eval,
                        verbose_eval=False,
                        # test custom eval metrics
611
                        feval=(lambda p, d: ('custom_mae', mean_absolute_error(p, d.get_label()), False)),
612
613
614
                        evals_result=evals_result,
                        init_model='model.txt')
        ret = mean_absolute_error(y_test, gbm.predict(X_test))
615
        self.assertLess(ret, 2.0)
616
        self.assertAlmostEqual(evals_result['valid_0']['l1'][-1], ret, places=5)
617
        np.testing.assert_allclose(evals_result['valid_0']['l1'], evals_result['valid_0']['custom_mae'])
wxchan's avatar
wxchan committed
618
619
        os.remove(model_name)

620
621
622
623
624
625
626
627
628
629
630
631
632
    def test_continue_train_reused_dataset(self):
        X, y = load_boston(True)
        params = {
            'objective': 'regression',
            'verbose': -1
        }
        lgb_train = lgb.Dataset(X, y, free_raw_data=False)
        init_gbm = lgb.train(params, lgb_train, num_boost_round=5)
        init_gbm_2 = lgb.train(params, lgb_train, num_boost_round=5, init_model=init_gbm)
        init_gbm_3 = lgb.train(params, lgb_train, num_boost_round=5, init_model=init_gbm_2)
        gbm = lgb.train(params, lgb_train, num_boost_round=5, init_model=init_gbm_3)
        self.assertEqual(gbm.current_iteration(), 20)

633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
    def test_continue_train_dart(self):
        X, y = load_boston(True)
        X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
        params = {
            'boosting_type': 'dart',
            'objective': 'regression',
            'metric': 'l1',
            'verbose': -1
        }
        lgb_train = lgb.Dataset(X_train, y_train, free_raw_data=False)
        lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train, free_raw_data=False)
        init_gbm = lgb.train(params, lgb_train, num_boost_round=50)
        evals_result = {}
        gbm = lgb.train(params, lgb_train,
                        num_boost_round=50,
                        valid_sets=lgb_eval,
                        verbose_eval=False,
                        evals_result=evals_result,
                        init_model=init_gbm)
        ret = mean_absolute_error(y_test, gbm.predict(X_test))
653
        self.assertLess(ret, 2.0)
654
655
        self.assertAlmostEqual(evals_result['valid_0']['l1'][-1], ret, places=5)

wxchan's avatar
wxchan committed
656
    def test_continue_train_multiclass(self):
657
658
        X, y = load_iris(True)
        X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
wxchan's avatar
wxchan committed
659
        params = {
wxchan's avatar
wxchan committed
660
661
            'objective': 'multiclass',
            'metric': 'multi_logloss',
662
663
            'num_class': 3,
            'verbose': -1
wxchan's avatar
wxchan committed
664
        }
665
666
667
668
669
670
671
672
673
674
675
        lgb_train = lgb.Dataset(X_train, y_train, params=params, free_raw_data=False)
        lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train, params=params, free_raw_data=False)
        init_gbm = lgb.train(params, lgb_train, num_boost_round=20)
        evals_result = {}
        gbm = lgb.train(params, lgb_train,
                        num_boost_round=30,
                        valid_sets=lgb_eval,
                        verbose_eval=False,
                        evals_result=evals_result,
                        init_model=init_gbm)
        ret = multi_logloss(y_test, gbm.predict(X_test))
676
        self.assertLess(ret, 0.1)
677
        self.assertAlmostEqual(evals_result['valid_0']['multi_logloss'][-1], ret, places=5)
wxchan's avatar
wxchan committed
678
679

    def test_cv(self):
680
        X_train, y_train = load_boston(True)
681
682
683
684
        params = {'verbose': -1}
        lgb_train = lgb.Dataset(X_train, y_train)
        # shuffle = False, override metric in params
        params_with_metric = {'metric': 'l2', 'verbose': -1}
685
686
        cv_res = lgb.cv(params_with_metric, lgb_train, num_boost_round=10,
                        nfold=3, stratified=False, shuffle=False,
687
688
689
690
                        metrics='l1', verbose_eval=False)
        self.assertIn('l1-mean', cv_res)
        self.assertNotIn('l2-mean', cv_res)
        self.assertEqual(len(cv_res['l1-mean']), 10)
wxchan's avatar
wxchan committed
691
        # shuffle = True, callbacks
692
693
694
695
696
        cv_res = lgb.cv(params, lgb_train, num_boost_round=10, nfold=3, stratified=False, shuffle=True,
                        metrics='l1', verbose_eval=False,
                        callbacks=[lgb.reset_parameter(learning_rate=lambda i: 0.1 - 0.001 * i)])
        self.assertIn('l1-mean', cv_res)
        self.assertEqual(len(cv_res['l1-mean']), 10)
697
698
699
700
701
702
703
704
705
706
        # enable display training loss
        cv_res = lgb.cv(params_with_metric, lgb_train, num_boost_round=10,
                        nfold=3, stratified=False, shuffle=False,
                        metrics='l1', verbose_eval=False, eval_train_metric=True)
        self.assertIn('train l1-mean', cv_res)
        self.assertIn('valid l1-mean', cv_res)
        self.assertNotIn('train l2-mean', cv_res)
        self.assertNotIn('valid l2-mean', cv_res)
        self.assertEqual(len(cv_res['train l1-mean']), 10)
        self.assertEqual(len(cv_res['valid l1-mean']), 10)
707
        # self defined folds
wxchan's avatar
wxchan committed
708
        tss = TimeSeriesSplit(3)
709
        folds = tss.split(X_train)
710
711
712
713
        cv_res_gen = lgb.cv(params_with_metric, lgb_train, num_boost_round=10, folds=folds,
                            verbose_eval=False)
        cv_res_obj = lgb.cv(params_with_metric, lgb_train, num_boost_round=10, folds=tss,
                            verbose_eval=False)
714
        np.testing.assert_allclose(cv_res_gen['l2-mean'], cv_res_obj['l2-mean'])
wxchan's avatar
wxchan committed
715
        # lambdarank
716
717
718
719
        X_train, y_train = load_svmlight_file(os.path.join(os.path.dirname(os.path.realpath(__file__)),
                                                           '../../examples/lambdarank/rank.train'))
        q_train = np.loadtxt(os.path.join(os.path.dirname(os.path.realpath(__file__)),
                                          '../../examples/lambdarank/rank.train.query'))
720
        params_lambdarank = {'objective': 'lambdarank', 'verbose': -1, 'eval_at': 3}
721
        lgb_train = lgb.Dataset(X_train, y_train, group=q_train)
722
        # ... with l2 metric
723
724
725
726
        cv_res_lambda = lgb.cv(params_lambdarank, lgb_train, num_boost_round=10, nfold=3,
                               metrics='l2', verbose_eval=False)
        self.assertEqual(len(cv_res_lambda), 2)
        self.assertFalse(np.isnan(cv_res_lambda['l2-mean']).any())
727
728
729
730
731
        # ... with NDCG (default) metric
        cv_res_lambda = lgb.cv(params_lambdarank, lgb_train, num_boost_round=10, nfold=3,
                               verbose_eval=False)
        self.assertEqual(len(cv_res_lambda), 2)
        self.assertFalse(np.isnan(cv_res_lambda['ndcg@3-mean']).any())
732
733
734
        # self defined folds with lambdarank
        cv_res_lambda_obj = lgb.cv(params_lambdarank, lgb_train, num_boost_round=10,
                                   folds=GroupKFold(n_splits=3),
735
                                   verbose_eval=False)
736
        np.testing.assert_allclose(cv_res_lambda['ndcg@3-mean'], cv_res_lambda_obj['ndcg@3-mean'])
wxchan's avatar
wxchan committed
737

wxchan's avatar
wxchan committed
738
    def test_feature_name(self):
739
        X_train, y_train = load_boston(True)
740
741
        params = {'verbose': -1}
        lgb_train = lgb.Dataset(X_train, y_train)
742
        feature_names = ['f_' + str(i) for i in range(X_train.shape[-1])]
743
        gbm = lgb.train(params, lgb_train, num_boost_round=5, feature_name=feature_names)
744
745
        self.assertListEqual(feature_names, gbm.feature_name())
        # test feature_names with whitespaces
746
        feature_names_with_space = ['f ' + str(i) for i in range(X_train.shape[-1])]
747
        gbm = lgb.train(params, lgb_train, num_boost_round=5, feature_name=feature_names_with_space)
wxchan's avatar
wxchan committed
748
749
        self.assertListEqual(feature_names, gbm.feature_name())

750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
    def test_feature_name_with_non_ascii(self):
        X_train = np.random.normal(size=(100, 4))
        y_train = np.random.random(100)
        # This has non-ascii strings.
        feature_names = [u'F_零', u'F_一', u'F_二', u'F_三']
        params = {'verbose': -1}
        lgb_train = lgb.Dataset(X_train, y_train)

        gbm = lgb.train(params, lgb_train, num_boost_round=5, feature_name=feature_names)
        self.assertListEqual(feature_names, gbm.feature_name())
        gbm.save_model('lgb.model')

        gbm2 = lgb.Booster(model_file='lgb.model')
        self.assertListEqual(feature_names, gbm2.feature_name())

wxchan's avatar
wxchan committed
765
    def test_save_load_copy_pickle(self):
766
        def train_and_predict(init_model=None, return_model=False):
767
            X, y = load_boston(True)
768
            X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
769
770
771
772
773
774
775
776
            params = {
                'objective': 'regression',
                'metric': 'l2',
                'verbose': -1
            }
            lgb_train = lgb.Dataset(X_train, y_train)
            gbm_template = lgb.train(params, lgb_train, num_boost_round=10, init_model=init_model)
            return gbm_template if return_model else mean_squared_error(y_test, gbm_template.predict(X_test))
777
778
779

        gbm = train_and_predict(return_model=True)
        ret_origin = train_and_predict(init_model=gbm)
wxchan's avatar
wxchan committed
780
781
        other_ret = []
        gbm.save_model('lgb.model')
782
783
        with open('lgb.model') as f:  # check all params are logged into model file correctly
            self.assertNotEqual(f.read().find("[num_iterations: 10]"), -1)
784
        other_ret.append(train_and_predict(init_model='lgb.model'))
wxchan's avatar
wxchan committed
785
        gbm_load = lgb.Booster(model_file='lgb.model')
786
787
788
        other_ret.append(train_and_predict(init_model=gbm_load))
        other_ret.append(train_and_predict(init_model=copy.copy(gbm)))
        other_ret.append(train_and_predict(init_model=copy.deepcopy(gbm)))
wxchan's avatar
wxchan committed
789
790
791
792
        with open('lgb.pkl', 'wb') as f:
            pickle.dump(gbm, f)
        with open('lgb.pkl', 'rb') as f:
            gbm_pickle = pickle.load(f)
793
        other_ret.append(train_and_predict(init_model=gbm_pickle))
wxchan's avatar
wxchan committed
794
        gbm_pickles = pickle.loads(pickle.dumps(gbm))
795
        other_ret.append(train_and_predict(init_model=gbm_pickles))
wxchan's avatar
wxchan committed
796
797
        for ret in other_ret:
            self.assertAlmostEqual(ret_origin, ret, places=5)
wxchan's avatar
wxchan committed
798

799
    @unittest.skipIf(not lgb.compat.PANDAS_INSTALLED, 'pandas is not installed')
800
    def test_pandas_categorical(self):
801
        import pandas as pd
802
        np.random.seed(42)  # sometimes there is no difference how cols are treated (cat or not cat)
803
804
805
        X = pd.DataFrame({"A": np.random.permutation(['a', 'b', 'c', 'd'] * 75),  # str
                          "B": np.random.permutation([1, 2, 3] * 100),  # int
                          "C": np.random.permutation([0.1, 0.2, -0.1, -0.1, 0.2] * 60),  # float
806
807
808
                          "D": np.random.permutation([True, False] * 150),  # bool
                          "E": pd.Categorical(np.random.permutation(['z', 'y', 'x', 'w', 'v'] * 60),
                                              ordered=True)})  # str and ordered categorical
809
        y = np.random.permutation([0, 1] * 150)
810
        X_test = pd.DataFrame({"A": np.random.permutation(['a', 'b', 'e'] * 20),  # unseen category
811
812
                               "B": np.random.permutation([1, 3] * 30),
                               "C": np.random.permutation([0.1, -0.1, 0.2, 0.2] * 15),
813
                               "D": np.random.permutation([True, False] * 30),
814
                               "E": pd.Categorical(np.random.permutation(['z', 'y'] * 30),
815
816
817
818
819
820
821
                                                   ordered=True)})
        np.random.seed()  # reset seed
        cat_cols_actual = ["A", "B", "C", "D"]
        cat_cols_to_store = cat_cols_actual + ["E"]
        X[cat_cols_actual] = X[cat_cols_actual].astype('category')
        X_test[cat_cols_actual] = X_test[cat_cols_actual].astype('category')
        cat_values = [X[col].cat.categories.tolist() for col in cat_cols_to_store]
822
823
824
825
826
827
        params = {
            'objective': 'binary',
            'metric': 'binary_logloss',
            'verbose': -1
        }
        lgb_train = lgb.Dataset(X, y)
828
        gbm0 = lgb.train(params, lgb_train, num_boost_round=10)
829
        pred0 = gbm0.predict(X_test)
830
        self.assertEqual(lgb_train.categorical_feature, 'auto')
831
        lgb_train = lgb.Dataset(X, pd.DataFrame(y))  # also test that label can be one-column pd.DataFrame
832
        gbm1 = lgb.train(params, lgb_train, num_boost_round=10, categorical_feature=[0])
833
        pred1 = gbm1.predict(X_test)
834
        self.assertListEqual(lgb_train.categorical_feature, [0])
835
        lgb_train = lgb.Dataset(X, pd.Series(y))  # also test that label can be pd.Series
836
        gbm2 = lgb.train(params, lgb_train, num_boost_round=10, categorical_feature=['A'])
837
        pred2 = gbm2.predict(X_test)
838
        self.assertListEqual(lgb_train.categorical_feature, ['A'])
839
        lgb_train = lgb.Dataset(X, y)
840
        gbm3 = lgb.train(params, lgb_train, num_boost_round=10, categorical_feature=['A', 'B', 'C', 'D'])
841
        pred3 = gbm3.predict(X_test)
842
        self.assertListEqual(lgb_train.categorical_feature, ['A', 'B', 'C', 'D'])
843
844
        gbm3.save_model('categorical.model')
        gbm4 = lgb.Booster(model_file='categorical.model')
845
846
847
848
        pred4 = gbm4.predict(X_test)
        model_str = gbm4.model_to_string()
        gbm4.model_from_string(model_str, False)
        pred5 = gbm4.predict(X_test)
849
        gbm5 = lgb.Booster(model_str=model_str)
850
        pred6 = gbm5.predict(X_test)
851
        lgb_train = lgb.Dataset(X, y)
852
        gbm6 = lgb.train(params, lgb_train, num_boost_round=10, categorical_feature=['A', 'B', 'C', 'D', 'E'])
853
        pred7 = gbm6.predict(X_test)
854
        self.assertListEqual(lgb_train.categorical_feature, ['A', 'B', 'C', 'D', 'E'])
855
856
857
858
859
        lgb_train = lgb.Dataset(X, y)
        gbm7 = lgb.train(params, lgb_train, num_boost_round=10, categorical_feature=[])
        pred8 = gbm7.predict(X_test)
        self.assertListEqual(lgb_train.categorical_feature, [])
        self.assertRaises(AssertionError,
860
                          np.testing.assert_allclose,
861
862
                          pred0, pred1)
        self.assertRaises(AssertionError,
863
                          np.testing.assert_allclose,
864
                          pred0, pred2)
865
866
867
868
869
        np.testing.assert_allclose(pred1, pred2)
        np.testing.assert_allclose(pred0, pred3)
        np.testing.assert_allclose(pred0, pred4)
        np.testing.assert_allclose(pred0, pred5)
        np.testing.assert_allclose(pred0, pred6)
870
        self.assertRaises(AssertionError,
871
                          np.testing.assert_allclose,
872
                          pred0, pred7)  # ordered cat features aren't treated as cat features by default
873
        self.assertRaises(AssertionError,
874
                          np.testing.assert_allclose,
875
                          pred0, pred8)
876
877
878
879
880
881
882
        self.assertListEqual(gbm0.pandas_categorical, cat_values)
        self.assertListEqual(gbm1.pandas_categorical, cat_values)
        self.assertListEqual(gbm2.pandas_categorical, cat_values)
        self.assertListEqual(gbm3.pandas_categorical, cat_values)
        self.assertListEqual(gbm4.pandas_categorical, cat_values)
        self.assertListEqual(gbm5.pandas_categorical, cat_values)
        self.assertListEqual(gbm6.pandas_categorical, cat_values)
883
        self.assertListEqual(gbm7.pandas_categorical, cat_values)
884

885
886
887
    @unittest.skipIf(not lgb.compat.PANDAS_INSTALLED, 'pandas is not installed')
    def test_pandas_sparse(self):
        import pandas as pd
888
889
890
891
892
893
894
895
896
897
898
        try:
            from pandas.arrays import SparseArray
        except ImportError:  # support old versions
            from pandas import SparseArray
        X = pd.DataFrame({"A": SparseArray(np.random.permutation([0, 1, 2] * 100)),
                          "B": SparseArray(np.random.permutation([0.0, 0.1, 0.2, -0.1, 0.2] * 60)),
                          "C": SparseArray(np.random.permutation([True, False] * 150))})
        y = pd.Series(SparseArray(np.random.permutation([0, 1] * 150)))
        X_test = pd.DataFrame({"A": SparseArray(np.random.permutation([0, 2] * 30)),
                               "B": SparseArray(np.random.permutation([0.0, 0.1, 0.2, -0.1] * 15)),
                               "C": SparseArray(np.random.permutation([True, False] * 30))})
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
        if pd.__version__ >= '0.24.0':
            for dtype in pd.concat([X.dtypes, X_test.dtypes, pd.Series(y.dtypes)]):
                self.assertTrue(pd.api.types.is_sparse(dtype))
        params = {
            'objective': 'binary',
            'verbose': -1
        }
        lgb_train = lgb.Dataset(X, y)
        gbm = lgb.train(params, lgb_train, num_boost_round=10)
        pred_sparse = gbm.predict(X_test, raw_score=True)
        if hasattr(X_test, 'sparse'):
            pred_dense = gbm.predict(X_test.sparse.to_dense(), raw_score=True)
        else:
            pred_dense = gbm.predict(X_test.to_dense(), raw_score=True)
        np.testing.assert_allclose(pred_sparse, pred_dense)

915
916
917
    def test_reference_chain(self):
        X = np.random.normal(size=(100, 2))
        y = np.random.normal(size=100)
918
919
        tmp_dat = lgb.Dataset(X, y)
        # take subsets and train
920
921
        tmp_dat_train = tmp_dat.subset(np.arange(80))
        tmp_dat_val = tmp_dat.subset(np.arange(80, 100)).subset(np.arange(18))
922
        params = {'objective': 'regression_l2', 'metric': 'rmse'}
923
924
        evals_result = {}
        gbm = lgb.train(params, tmp_dat_train, num_boost_round=20,
925
926
                        valid_sets=[tmp_dat_train, tmp_dat_val],
                        verbose_eval=False, evals_result=evals_result)
927
928
        self.assertEqual(len(evals_result['training']['rmse']), 20)
        self.assertEqual(len(evals_result['valid_1']['rmse']), 20)
929
930
931
932
933
934
935
936
937
938

    def test_contribs(self):
        X, y = load_breast_cancer(True)
        X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
        params = {
            'objective': 'binary',
            'metric': 'binary_logloss',
            'verbose': -1,
        }
        lgb_train = lgb.Dataset(X_train, y_train)
939
        gbm = lgb.train(params, lgb_train, num_boost_round=20)
940

941
942
        self.assertLess(np.linalg.norm(gbm.predict(X_test, raw_score=True)
                                       - np.sum(gbm.predict(X_test, pred_contrib=True), axis=1)), 1e-4)
943

944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
    def test_contribs_sparse(self):
        n_features = 20
        n_samples = 100
        # generate CSR sparse dataset
        X, y = make_multilabel_classification(n_samples=n_samples,
                                              sparse=True,
                                              n_features=n_features,
                                              n_classes=1,
                                              n_labels=2)
        y = y.flatten()
        X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
        params = {
            'objective': 'binary',
            'verbose': -1,
        }
        lgb_train = lgb.Dataset(X_train, y_train)
        gbm = lgb.train(params, lgb_train, num_boost_round=20)
        contribs_csr = gbm.predict(X_test, pred_contrib=True)
        self.assertTrue(isspmatrix_csr(contribs_csr))
        # convert data to dense and get back same contribs
        contribs_dense = gbm.predict(X_test.toarray(), pred_contrib=True)
        # validate the values are the same
        np.testing.assert_allclose(contribs_csr.toarray(), contribs_dense)
        self.assertLess(np.linalg.norm(gbm.predict(X_test, raw_score=True)
                                       - np.sum(contribs_dense, axis=1)), 1e-4)
        # validate using CSC matrix
        X_test_csc = X_test.tocsc()
        contribs_csc = gbm.predict(X_test_csc, pred_contrib=True)
        self.assertTrue(isspmatrix_csc(contribs_csc))
        # validate the values are the same
        np.testing.assert_allclose(contribs_csc.toarray(), contribs_dense)

    @unittest.skipIf(psutil.virtual_memory().available / 1024 / 1024 / 1024 < 3, 'not enough RAM')
    def test_int32_max_sparse_contribs(self):
        params = {
            'objective': 'binary'
        }
        train_features = np.random.rand(100, 1000)
        train_targets = [0] * 50 + [1] * 50
        lgb_train = lgb.Dataset(train_features, train_targets)
        gbm = lgb.train(params, lgb_train, num_boost_round=2)
        csr_input_shape = (3000000, 1000)
        test_features = csr_matrix(csr_input_shape)
        for i in range(0, csr_input_shape[0], csr_input_shape[0] // 6):
            for j in range(0, 1000, 100):
                test_features[i, j] = random.random()
        y_pred_csr = gbm.predict(test_features, pred_contrib=True)
        # Note there is an extra column added to the output for the expected value
        csr_output_shape = (csr_input_shape[0], csr_input_shape[1] + 1)
        self.assertTupleEqual(y_pred_csr.shape, csr_output_shape)
        y_pred_csc = gbm.predict(test_features.tocsc(), pred_contrib=True)
        # Note output CSC shape should be same as CSR output shape
        self.assertTupleEqual(y_pred_csc.shape, csr_output_shape)

998
999
1000
1001
1002
1003
1004
1005
    def test_sliced_data(self):
        def train_and_get_predictions(features, labels):
            dataset = lgb.Dataset(features, label=labels)
            lgb_params = {
                'application': 'binary',
                'verbose': -1,
                'min_data': 5,
            }
1006
            gbm = lgb.train(
1007
1008
1009
1010
                params=lgb_params,
                train_set=dataset,
                num_boost_round=10,
            )
1011
1012
            return gbm.predict(features)

1013
1014
1015
        num_samples = 100
        features = np.random.rand(num_samples, 5)
        positive_samples = int(num_samples * 0.25)
1016
1017
        labels = np.append(np.ones(positive_samples, dtype=np.float32),
                           np.zeros(num_samples - positive_samples, dtype=np.float32))
1018
1019
1020
1021
1022
        # test sliced labels
        origin_pred = train_and_get_predictions(features, labels)
        stacked_labels = np.column_stack((labels, np.ones(num_samples, dtype=np.float32)))
        sliced_labels = stacked_labels[:, 0]
        sliced_pred = train_and_get_predictions(features, sliced_labels)
1023
        np.testing.assert_allclose(origin_pred, sliced_pred)
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
        # append some columns
        stacked_features = np.column_stack((np.ones(num_samples, dtype=np.float32), features))
        stacked_features = np.column_stack((np.ones(num_samples, dtype=np.float32), stacked_features))
        stacked_features = np.column_stack((stacked_features, np.ones(num_samples, dtype=np.float32)))
        stacked_features = np.column_stack((stacked_features, np.ones(num_samples, dtype=np.float32)))
        # append some rows
        stacked_features = np.concatenate((np.ones(9, dtype=np.float32).reshape((1, 9)), stacked_features), axis=0)
        stacked_features = np.concatenate((np.ones(9, dtype=np.float32).reshape((1, 9)), stacked_features), axis=0)
        stacked_features = np.concatenate((stacked_features, np.ones(9, dtype=np.float32).reshape((1, 9))), axis=0)
        stacked_features = np.concatenate((stacked_features, np.ones(9, dtype=np.float32).reshape((1, 9))), axis=0)
        # test sliced 2d matrix
1035
1036
        sliced_features = stacked_features[2:102, 2:7]
        self.assertTrue(np.all(sliced_features == features))
1037
        sliced_pred = train_and_get_predictions(sliced_features, sliced_labels)
1038
        np.testing.assert_allclose(origin_pred, sliced_pred)
1039
1040
        # test sliced CSR
        stacked_csr = csr_matrix(stacked_features)
1041
1042
        sliced_csr = stacked_csr[2:102, 2:7]
        self.assertTrue(np.all(sliced_csr == features))
1043
        sliced_pred = train_and_get_predictions(sliced_csr, sliced_labels)
1044
        np.testing.assert_allclose(origin_pred, sliced_pred)
Guolin Ke's avatar
Guolin Ke committed
1045

Guolin Ke's avatar
Guolin Ke committed
1046
    def test_init_with_subset(self):
1047
1048
        data = np.random.random((50, 2))
        y = [1] * 25 + [0] * 25
Guolin Ke's avatar
Guolin Ke committed
1049
        lgb_train = lgb.Dataset(data, y, free_raw_data=False)
1050
        subset_index_1 = np.random.choice(np.arange(50), 30, replace=False)
Guolin Ke's avatar
Guolin Ke committed
1051
        subset_data_1 = lgb_train.subset(subset_index_1)
1052
        subset_index_2 = np.random.choice(np.arange(50), 20, replace=False)
Guolin Ke's avatar
Guolin Ke committed
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
        subset_data_2 = lgb_train.subset(subset_index_2)
        params = {
            'objective': 'binary',
            'verbose': -1
        }
        init_gbm = lgb.train(params=params,
                             train_set=subset_data_1,
                             num_boost_round=10,
                             keep_training_booster=True)
        gbm = lgb.train(params=params,
                        train_set=subset_data_2,
                        num_boost_round=10,
                        init_model=init_gbm)
1066
1067
1068
        self.assertEqual(lgb_train.get_data().shape[0], 50)
        self.assertEqual(subset_data_1.get_data().shape[0], 30)
        self.assertEqual(subset_data_2.get_data().shape[0], 20)
Guolin Ke's avatar
Guolin Ke committed
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
        lgb_train.save_binary("lgb_train_data.bin")
        lgb_train_from_file = lgb.Dataset('lgb_train_data.bin', free_raw_data=False)
        subset_data_3 = lgb_train_from_file.subset(subset_index_1)
        subset_data_4 = lgb_train_from_file.subset(subset_index_2)
        init_gbm_2 = lgb.train(params=params,
                               train_set=subset_data_3,
                               num_boost_round=10,
                               keep_training_booster=True)
        with np.testing.assert_raises_regex(lgb.basic.LightGBMError, "Unknown format of training data"):
            gbm = lgb.train(params=params,
                            train_set=subset_data_4,
                            num_boost_round=10,
                            init_model=init_gbm_2)
        self.assertEqual(lgb_train_from_file.get_data(), "lgb_train_data.bin")
        self.assertEqual(subset_data_3.get_data(), "lgb_train_data.bin")
        self.assertEqual(subset_data_4.get_data(), "lgb_train_data.bin")

1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
    def generate_trainset_for_monotone_constraints_tests(self, x3_to_category=True):
        number_of_dpoints = 3000
        x1_positively_correlated_with_y = np.random.random(size=number_of_dpoints)
        x2_negatively_correlated_with_y = np.random.random(size=number_of_dpoints)
        x3_negatively_correlated_with_y = np.random.random(size=number_of_dpoints)
        x = np.column_stack(
            (x1_positively_correlated_with_y,
             x2_negatively_correlated_with_y,
             categorize(x3_negatively_correlated_with_y) if x3_to_category else x3_negatively_correlated_with_y))

        zs = np.random.normal(loc=0.0, scale=0.01, size=number_of_dpoints)
        scales = 10. * (np.random.random(6) + 0.5)
        y = (scales[0] * x1_positively_correlated_with_y
             + np.sin(scales[1] * np.pi * x1_positively_correlated_with_y)
             - scales[2] * x2_negatively_correlated_with_y
             - np.cos(scales[3] * np.pi * x2_negatively_correlated_with_y)
             - scales[4] * x3_negatively_correlated_with_y
             - np.cos(scales[5] * np.pi * x3_negatively_correlated_with_y)
             + zs)
        categorical_features = []
        if x3_to_category:
            categorical_features = [2]
1108
        trainset = lgb.Dataset(x, label=y, categorical_feature=categorical_features, free_raw_data=False)
1109
1110
1111
        return trainset

    def test_monotone_constraints(self):
Guolin Ke's avatar
Guolin Ke committed
1112
        def is_increasing(y):
1113
            return (np.diff(y) >= 0.0).all()
Guolin Ke's avatar
Guolin Ke committed
1114
1115

        def is_decreasing(y):
1116
            return (np.diff(y) <= 0.0).all()
Guolin Ke's avatar
Guolin Ke committed
1117

1118
1119
1120
1121
1122
1123
        def is_non_monotone(y):
            return (np.diff(y) < 0.0).any() and (np.diff(y) > 0.0).any()

        def is_correctly_constrained(learner, x3_to_category=True):
            iterations = 10
            n = 1000
Guolin Ke's avatar
Guolin Ke committed
1124
1125
            variable_x = np.linspace(0, 1, n).reshape((n, 1))
            fixed_xs_values = np.linspace(0, 1, n)
1126
            for i in range(iterations):
Guolin Ke's avatar
Guolin Ke committed
1127
                fixed_x = fixed_xs_values[i] * np.ones((n, 1))
1128
                monotonically_increasing_x = np.column_stack((variable_x, fixed_x, fixed_x))
Guolin Ke's avatar
Guolin Ke committed
1129
                monotonically_increasing_y = learner.predict(monotonically_increasing_x)
1130
                monotonically_decreasing_x = np.column_stack((fixed_x, variable_x, fixed_x))
Guolin Ke's avatar
Guolin Ke committed
1131
                monotonically_decreasing_y = learner.predict(monotonically_decreasing_x)
1132
1133
1134
1135
1136
1137
1138
                non_monotone_x = np.column_stack((fixed_x,
                                                  fixed_x,
                                                  categorize(variable_x) if x3_to_category else variable_x))
                non_monotone_y = learner.predict(non_monotone_x)
                if not (is_increasing(monotonically_increasing_y)
                        and is_decreasing(monotonically_decreasing_y)
                        and is_non_monotone(non_monotone_y)):
Guolin Ke's avatar
Guolin Ke committed
1139
1140
1141
                    return False
            return True

1142
        for test_with_categorical_variable in [True, False]:
1143
            trainset = self.generate_trainset_for_monotone_constraints_tests(test_with_categorical_variable)
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
            for monotone_constraints_method in ["basic", "intermediate"]:
                params = {
                    'min_data': 20,
                    'num_leaves': 20,
                    'monotone_constraints': [1, -1, 0],
                    "monotone_constraints_method": monotone_constraints_method,
                    "use_missing": False,
                }
                constrained_model = lgb.train(params, trainset)
                self.assertTrue(is_correctly_constrained(constrained_model, test_with_categorical_variable))
Guolin Ke's avatar
Guolin Ke committed
1154

1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
    def test_monotone_penalty(self):
        def are_first_splits_non_monotone(tree, n, monotone_constraints):
            if n <= 0:
                return True
            if "leaf_value" in tree:
                return True
            if monotone_constraints[tree["split_feature"]] != 0:
                return False
            return (are_first_splits_non_monotone(tree["left_child"], n - 1, monotone_constraints)
                    and are_first_splits_non_monotone(tree["right_child"], n - 1, monotone_constraints))

        def are_there_monotone_splits(tree, monotone_constraints):
            if "leaf_value" in tree:
                return False
            if monotone_constraints[tree["split_feature"]] != 0:
                return True
            return (are_there_monotone_splits(tree["left_child"], monotone_constraints)
                    or are_there_monotone_splits(tree["right_child"], monotone_constraints))

        max_depth = 5
        monotone_constraints = [1, -1, 0]
        penalization_parameter = 2.0
        trainset = self.generate_trainset_for_monotone_constraints_tests(x3_to_category=False)
        for monotone_constraints_method in ["basic", "intermediate"]:
            params = {
                'max_depth': max_depth,
                'monotone_constraints': monotone_constraints,
                'monotone_penalty': penalization_parameter,
                "monotone_constraints_method": monotone_constraints_method,
            }
            constrained_model = lgb.train(params, trainset, 10)
            dumped_model = constrained_model.dump_model()["tree_info"]
            for tree in dumped_model:
                self.assertTrue(are_first_splits_non_monotone(tree["tree_structure"], int(penalization_parameter),
                                                              monotone_constraints))
                self.assertTrue(are_there_monotone_splits(tree["tree_structure"], monotone_constraints))

    # test if a penalty as high as the depth indeed prohibits all monotone splits
    def test_monotone_penalty_max(self):
        max_depth = 5
        monotone_constraints = [1, -1, 0]
        penalization_parameter = max_depth
        trainset_constrained_model = self.generate_trainset_for_monotone_constraints_tests(x3_to_category=False)
        x = trainset_constrained_model.data
        y = trainset_constrained_model.label
        x3_negatively_correlated_with_y = x[:, 2]
        trainset_unconstrained_model = lgb.Dataset(x3_negatively_correlated_with_y.reshape(-1, 1), label=y)
        params_constrained_model = {
            'monotone_constraints': monotone_constraints,
            'monotone_penalty': penalization_parameter,
            "max_depth": max_depth,
            "gpu_use_dp": True,
        }
        params_unconstrained_model = {
            "max_depth": max_depth,
            "gpu_use_dp": True,
        }

        unconstrained_model = lgb.train(params_unconstrained_model, trainset_unconstrained_model, 10)
        unconstrained_model_predictions = unconstrained_model.\
            predict(x3_negatively_correlated_with_y.reshape(-1, 1))

        for monotone_constraints_method in ["basic", "intermediate"]:
            params_constrained_model["monotone_constraints_method"] = monotone_constraints_method
            # The penalization is so high that the first 2 features should not be used here
            constrained_model = lgb.train(params_constrained_model, trainset_constrained_model, 10)

            # Check that a very high penalization is the same as not using the features at all
            np.testing.assert_array_equal(constrained_model.predict(x), unconstrained_model_predictions)

Belinda Trotta's avatar
Belinda Trotta committed
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
    def test_max_bin_by_feature(self):
        col1 = np.arange(0, 100)[:, np.newaxis]
        col2 = np.zeros((100, 1))
        col2[20:] = 1
        X = np.concatenate([col1, col2], axis=1)
        y = np.arange(0, 100)
        params = {
            'objective': 'regression_l2',
            'verbose': -1,
            'num_leaves': 100,
            'min_data_in_leaf': 1,
            'min_sum_hessian_in_leaf': 0,
            'min_data_in_bin': 1,
            'max_bin_by_feature': [100, 2]
        }
        lgb_data = lgb.Dataset(X, label=y)
        est = lgb.train(params, lgb_data, num_boost_round=1)
1242
        self.assertEqual(len(np.unique(est.predict(X))), 100)
Belinda Trotta's avatar
Belinda Trotta committed
1243
1244
1245
1246
1247
        params['max_bin_by_feature'] = [2, 100]
        lgb_data = lgb.Dataset(X, label=y)
        est = lgb.train(params, lgb_data, num_boost_round=1)
        self.assertEqual(len(np.unique(est.predict(X))), 3)

1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
    def test_small_max_bin(self):
        np.random.seed(0)
        y = np.random.choice([0, 1], 100)
        x = np.zeros((100, 1))
        x[:30, 0] = -1
        x[30:60, 0] = 1
        x[60:, 0] = 2
        params = {'objective': 'binary',
                  'seed': 0,
                  'min_data_in_leaf': 1,
                  'verbose': -1,
                  'max_bin': 2}
        lgb_x = lgb.Dataset(x, label=y)
1261
        lgb.train(params, lgb_x, num_boost_round=5)
1262
1263
1264
        x[0, 0] = np.nan
        params['max_bin'] = 3
        lgb_x = lgb.Dataset(x, label=y)
1265
        lgb.train(params, lgb_x, num_boost_round=5)
1266
1267
        np.random.seed()  # reset seed

Guolin Ke's avatar
Guolin Ke committed
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
    def test_refit(self):
        X, y = load_breast_cancer(True)
        X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
        params = {
            'objective': 'binary',
            'metric': 'binary_logloss',
            'verbose': -1,
            'min_data': 10
        }
        lgb_train = lgb.Dataset(X_train, y_train)
1278
        gbm = lgb.train(params, lgb_train, num_boost_round=20)
Guolin Ke's avatar
Guolin Ke committed
1279
1280
1281
1282
        err_pred = log_loss(y_test, gbm.predict(X_test))
        new_gbm = gbm.refit(X_test, y_test)
        new_err_pred = log_loss(y_test, new_gbm.predict(X_test))
        self.assertGreater(err_pred, new_err_pred)
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292

    def test_mape_rf(self):
        X, y = load_boston(True)
        params = {
            'boosting_type': 'rf',
            'objective': 'mape',
            'verbose': -1,
            'bagging_freq': 1,
            'bagging_fraction': 0.8,
            'feature_fraction': 0.8,
Guolin Ke's avatar
Guolin Ke committed
1293
            'boost_from_average': True
1294
1295
        }
        lgb_train = lgb.Dataset(X, y)
1296
        gbm = lgb.train(params, lgb_train, num_boost_round=20)
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
        pred = gbm.predict(X)
        pred_mean = pred.mean()
        self.assertGreater(pred_mean, 20)

    def test_mape_dart(self):
        X, y = load_boston(True)
        params = {
            'boosting_type': 'dart',
            'objective': 'mape',
            'verbose': -1,
            'bagging_freq': 1,
            'bagging_fraction': 0.8,
            'feature_fraction': 0.8,
            'boost_from_average': False
        }
        lgb_train = lgb.Dataset(X, y)
1313
        gbm = lgb.train(params, lgb_train, num_boost_round=40)
1314
1315
1316
        pred = gbm.predict(X)
        pred_mean = pred.mean()
        self.assertGreater(pred_mean, 18)
1317

1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
    def check_constant_features(self, y_true, expected_pred, more_params):
        X_train = np.ones((len(y_true), 1))
        y_train = np.array(y_true)
        params = {
            'objective': 'regression',
            'num_class': 1,
            'verbose': -1,
            'min_data': 1,
            'num_leaves': 2,
            'learning_rate': 1,
            'min_data_in_bin': 1,
            'boost_from_average': True
        }
        params.update(more_params)
        lgb_train = lgb.Dataset(X_train, y_train, params=params)
        gbm = lgb.train(params, lgb_train, num_boost_round=2)
        pred = gbm.predict(X_train)
        self.assertTrue(np.allclose(pred, expected_pred))
1336
1337
1338
1339
1340

    def test_constant_features_regression(self):
        params = {
            'objective': 'regression'
        }
1341
1342
1343
        self.check_constant_features([0.0, 10.0, 0.0, 10.0], 5.0, params)
        self.check_constant_features([0.0, 1.0, 2.0, 3.0], 1.5, params)
        self.check_constant_features([-1.0, 1.0, -2.0, 2.0], 0.0, params)
1344
1345
1346
1347
1348

    def test_constant_features_binary(self):
        params = {
            'objective': 'binary'
        }
1349
1350
        self.check_constant_features([0.0, 10.0, 0.0, 10.0], 0.5, params)
        self.check_constant_features([0.0, 1.0, 2.0, 3.0], 0.75, params)
1351
1352
1353
1354
1355
1356

    def test_constant_features_multiclass(self):
        params = {
            'objective': 'multiclass',
            'num_class': 3
        }
1357
1358
        self.check_constant_features([0.0, 1.0, 2.0, 0.0], [0.5, 0.25, 0.25], params)
        self.check_constant_features([0.0, 1.0, 2.0, 1.0], [0.25, 0.5, 0.25], params)
1359
1360
1361
1362
1363
1364

    def test_constant_features_multiclassova(self):
        params = {
            'objective': 'multiclassova',
            'num_class': 3
        }
1365
1366
        self.check_constant_features([0.0, 1.0, 2.0, 0.0], [0.5, 0.25, 0.25], params)
        self.check_constant_features([0.0, 1.0, 2.0, 1.0], [0.25, 0.5, 0.25], params)
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386

    def test_fpreproc(self):
        def preprocess_data(dtrain, dtest, params):
            train_data = dtrain.construct().get_data()
            test_data = dtest.construct().get_data()
            train_data[:, 0] += 1
            test_data[:, 0] += 1
            dtrain.label[-5:] = 3
            dtest.label[-5:] = 3
            dtrain = lgb.Dataset(train_data, dtrain.label)
            dtest = lgb.Dataset(test_data, dtest.label, reference=dtrain)
            params['num_class'] = 4
            return dtrain, dtest, params

        X, y = load_iris(True)
        dataset = lgb.Dataset(X, y, free_raw_data=False)
        params = {'objective': 'multiclass', 'num_class': 3, 'verbose': -1}
        results = lgb.cv(params, dataset, num_boost_round=10, fpreproc=preprocess_data)
        self.assertIn('multi_logloss-mean', results)
        self.assertEqual(len(results['multi_logloss-mean']), 10)
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410

    def test_metrics(self):
        X, y = load_digits(2, True)
        X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
        lgb_train = lgb.Dataset(X_train, y_train, silent=True)
        lgb_valid = lgb.Dataset(X_test, y_test, reference=lgb_train, silent=True)

        evals_result = {}
        params_verbose = {'verbose': -1}
        params_obj_verbose = {'objective': 'binary', 'verbose': -1}
        params_obj_metric_log_verbose = {'objective': 'binary', 'metric': 'binary_logloss', 'verbose': -1}
        params_obj_metric_err_verbose = {'objective': 'binary', 'metric': 'binary_error', 'verbose': -1}
        params_obj_metric_inv_verbose = {'objective': 'binary', 'metric': 'invalid_metric', 'verbose': -1}
        params_obj_metric_multi_verbose = {'objective': 'binary',
                                           'metric': ['binary_logloss', 'binary_error'],
                                           'verbose': -1}
        params_obj_metric_none_verbose = {'objective': 'binary', 'metric': 'None', 'verbose': -1}
        params_metric_log_verbose = {'metric': 'binary_logloss', 'verbose': -1}
        params_metric_err_verbose = {'metric': 'binary_error', 'verbose': -1}
        params_metric_inv_verbose = {'metric_types': 'invalid_metric', 'verbose': -1}
        params_metric_multi_verbose = {'metric': ['binary_logloss', 'binary_error'], 'verbose': -1}
        params_metric_none_verbose = {'metric': 'None', 'verbose': -1}

        def get_cv_result(params=params_obj_verbose, **kwargs):
1411
            return lgb.cv(params, lgb_train, num_boost_round=2, verbose_eval=False, **kwargs)
1412
1413
1414

        def train_booster(params=params_obj_verbose, **kwargs):
            lgb.train(params, lgb_train,
1415
                      num_boost_round=2,
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
                      valid_sets=[lgb_valid],
                      evals_result=evals_result,
                      verbose_eval=False, **kwargs)

        # no fobj, no feval
        # default metric
        res = get_cv_result()
        self.assertEqual(len(res), 2)
        self.assertIn('binary_logloss-mean', res)

        # non-default metric in params
        res = get_cv_result(params=params_obj_metric_err_verbose)
        self.assertEqual(len(res), 2)
        self.assertIn('binary_error-mean', res)

        # default metric in args
        res = get_cv_result(metrics='binary_logloss')
        self.assertEqual(len(res), 2)
        self.assertIn('binary_logloss-mean', res)

        # non-default metric in args
        res = get_cv_result(metrics='binary_error')
        self.assertEqual(len(res), 2)
        self.assertIn('binary_error-mean', res)

        # metric in args overwrites one in params
        res = get_cv_result(params=params_obj_metric_inv_verbose, metrics='binary_error')
        self.assertEqual(len(res), 2)
        self.assertIn('binary_error-mean', res)

        # multiple metrics in params
        res = get_cv_result(params=params_obj_metric_multi_verbose)
        self.assertEqual(len(res), 4)
        self.assertIn('binary_logloss-mean', res)
        self.assertIn('binary_error-mean', res)

        # multiple metrics in args
        res = get_cv_result(metrics=['binary_logloss', 'binary_error'])
        self.assertEqual(len(res), 4)
        self.assertIn('binary_logloss-mean', res)
        self.assertIn('binary_error-mean', res)

        # remove default metric by 'None' in list
        res = get_cv_result(metrics=['None'])
        self.assertEqual(len(res), 0)

        # remove default metric by 'None' aliases
        for na_alias in ('None', 'na', 'null', 'custom'):
            res = get_cv_result(metrics=na_alias)
            self.assertEqual(len(res), 0)

        # fobj, no feval
        # no default metric
1469
        res = get_cv_result(params=params_verbose, fobj=dummy_obj)
1470
1471
1472
        self.assertEqual(len(res), 0)

        # metric in params
1473
        res = get_cv_result(params=params_metric_err_verbose, fobj=dummy_obj)
1474
1475
1476
1477
        self.assertEqual(len(res), 2)
        self.assertIn('binary_error-mean', res)

        # metric in args
1478
        res = get_cv_result(params=params_verbose, fobj=dummy_obj, metrics='binary_error')
1479
1480
1481
1482
        self.assertEqual(len(res), 2)
        self.assertIn('binary_error-mean', res)

        # metric in args overwrites its' alias in params
1483
        res = get_cv_result(params=params_metric_inv_verbose, fobj=dummy_obj, metrics='binary_error')
1484
1485
1486
1487
        self.assertEqual(len(res), 2)
        self.assertIn('binary_error-mean', res)

        # multiple metrics in params
1488
        res = get_cv_result(params=params_metric_multi_verbose, fobj=dummy_obj)
1489
1490
1491
1492
1493
        self.assertEqual(len(res), 4)
        self.assertIn('binary_logloss-mean', res)
        self.assertIn('binary_error-mean', res)

        # multiple metrics in args
1494
        res = get_cv_result(params=params_verbose, fobj=dummy_obj,
1495
1496
1497
1498
1499
1500
1501
                            metrics=['binary_logloss', 'binary_error'])
        self.assertEqual(len(res), 4)
        self.assertIn('binary_logloss-mean', res)
        self.assertIn('binary_error-mean', res)

        # no fobj, feval
        # default metric with custom one
1502
        res = get_cv_result(feval=constant_metric)
1503
1504
1505
1506
1507
        self.assertEqual(len(res), 4)
        self.assertIn('binary_logloss-mean', res)
        self.assertIn('error-mean', res)

        # non-default metric in params with custom one
1508
        res = get_cv_result(params=params_obj_metric_err_verbose, feval=constant_metric)
1509
1510
1511
1512
1513
        self.assertEqual(len(res), 4)
        self.assertIn('binary_error-mean', res)
        self.assertIn('error-mean', res)

        # default metric in args with custom one
1514
        res = get_cv_result(metrics='binary_logloss', feval=constant_metric)
1515
1516
1517
1518
1519
        self.assertEqual(len(res), 4)
        self.assertIn('binary_logloss-mean', res)
        self.assertIn('error-mean', res)

        # non-default metric in args with custom one
1520
        res = get_cv_result(metrics='binary_error', feval=constant_metric)
1521
1522
1523
1524
1525
        self.assertEqual(len(res), 4)
        self.assertIn('binary_error-mean', res)
        self.assertIn('error-mean', res)

        # metric in args overwrites one in params, custom one is evaluated too
1526
        res = get_cv_result(params=params_obj_metric_inv_verbose, metrics='binary_error', feval=constant_metric)
1527
1528
1529
1530
1531
        self.assertEqual(len(res), 4)
        self.assertIn('binary_error-mean', res)
        self.assertIn('error-mean', res)

        # multiple metrics in params with custom one
1532
        res = get_cv_result(params=params_obj_metric_multi_verbose, feval=constant_metric)
1533
1534
1535
1536
1537
1538
        self.assertEqual(len(res), 6)
        self.assertIn('binary_logloss-mean', res)
        self.assertIn('binary_error-mean', res)
        self.assertIn('error-mean', res)

        # multiple metrics in args with custom one
1539
        res = get_cv_result(metrics=['binary_logloss', 'binary_error'], feval=constant_metric)
1540
1541
1542
1543
1544
1545
        self.assertEqual(len(res), 6)
        self.assertIn('binary_logloss-mean', res)
        self.assertIn('binary_error-mean', res)
        self.assertIn('error-mean', res)

        # custom metric is evaluated despite 'None' is passed
1546
        res = get_cv_result(metrics=['None'], feval=constant_metric)
1547
1548
1549
1550
1551
        self.assertEqual(len(res), 2)
        self.assertIn('error-mean', res)

        # fobj, feval
        # no default metric, only custom one
1552
        res = get_cv_result(params=params_verbose, fobj=dummy_obj, feval=constant_metric)
1553
1554
1555
1556
        self.assertEqual(len(res), 2)
        self.assertIn('error-mean', res)

        # metric in params with custom one
1557
        res = get_cv_result(params=params_metric_err_verbose, fobj=dummy_obj, feval=constant_metric)
1558
1559
1560
1561
1562
        self.assertEqual(len(res), 4)
        self.assertIn('binary_error-mean', res)
        self.assertIn('error-mean', res)

        # metric in args with custom one
1563
1564
        res = get_cv_result(params=params_verbose, fobj=dummy_obj,
                            feval=constant_metric, metrics='binary_error')
1565
1566
1567
1568
1569
        self.assertEqual(len(res), 4)
        self.assertIn('binary_error-mean', res)
        self.assertIn('error-mean', res)

        # metric in args overwrites one in params, custom one is evaluated too
1570
1571
        res = get_cv_result(params=params_metric_inv_verbose, fobj=dummy_obj,
                            feval=constant_metric, metrics='binary_error')
1572
1573
1574
1575
1576
        self.assertEqual(len(res), 4)
        self.assertIn('binary_error-mean', res)
        self.assertIn('error-mean', res)

        # multiple metrics in params with custom one
1577
        res = get_cv_result(params=params_metric_multi_verbose, fobj=dummy_obj, feval=constant_metric)
1578
1579
1580
1581
1582
1583
        self.assertEqual(len(res), 6)
        self.assertIn('binary_logloss-mean', res)
        self.assertIn('binary_error-mean', res)
        self.assertIn('error-mean', res)

        # multiple metrics in args with custom one
1584
        res = get_cv_result(params=params_verbose, fobj=dummy_obj, feval=constant_metric,
1585
1586
1587
1588
1589
1590
1591
                            metrics=['binary_logloss', 'binary_error'])
        self.assertEqual(len(res), 6)
        self.assertIn('binary_logloss-mean', res)
        self.assertIn('binary_error-mean', res)
        self.assertIn('error-mean', res)

        # custom metric is evaluated despite 'None' is passed
1592
        res = get_cv_result(params=params_metric_none_verbose, fobj=dummy_obj, feval=constant_metric)
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
        self.assertEqual(len(res), 2)
        self.assertIn('error-mean', res)

        # no fobj, no feval
        # default metric
        train_booster()
        self.assertEqual(len(evals_result['valid_0']), 1)
        self.assertIn('binary_logloss', evals_result['valid_0'])

        # default metric in params
        train_booster(params=params_obj_metric_log_verbose)
        self.assertEqual(len(evals_result['valid_0']), 1)
        self.assertIn('binary_logloss', evals_result['valid_0'])

        # non-default metric in params
        train_booster(params=params_obj_metric_err_verbose)
        self.assertEqual(len(evals_result['valid_0']), 1)
        self.assertIn('binary_error', evals_result['valid_0'])

        # multiple metrics in params
        train_booster(params=params_obj_metric_multi_verbose)
        self.assertEqual(len(evals_result['valid_0']), 2)
        self.assertIn('binary_logloss', evals_result['valid_0'])
        self.assertIn('binary_error', evals_result['valid_0'])

        # remove default metric by 'None' aliases
        for na_alias in ('None', 'na', 'null', 'custom'):
            params = {'objective': 'binary', 'metric': na_alias, 'verbose': -1}
            train_booster(params=params)
            self.assertEqual(len(evals_result), 0)

        # fobj, no feval
        # no default metric
1626
        train_booster(params=params_verbose, fobj=dummy_obj)
1627
1628
1629
        self.assertEqual(len(evals_result), 0)

        # metric in params
1630
        train_booster(params=params_metric_log_verbose, fobj=dummy_obj)
1631
1632
1633
1634
        self.assertEqual(len(evals_result['valid_0']), 1)
        self.assertIn('binary_logloss', evals_result['valid_0'])

        # multiple metrics in params
1635
        train_booster(params=params_metric_multi_verbose, fobj=dummy_obj)
1636
1637
1638
1639
1640
1641
        self.assertEqual(len(evals_result['valid_0']), 2)
        self.assertIn('binary_logloss', evals_result['valid_0'])
        self.assertIn('binary_error', evals_result['valid_0'])

        # no fobj, feval
        # default metric with custom one
1642
        train_booster(feval=constant_metric)
1643
1644
1645
1646
1647
        self.assertEqual(len(evals_result['valid_0']), 2)
        self.assertIn('binary_logloss', evals_result['valid_0'])
        self.assertIn('error', evals_result['valid_0'])

        # default metric in params with custom one
1648
        train_booster(params=params_obj_metric_log_verbose, feval=constant_metric)
1649
1650
1651
1652
1653
        self.assertEqual(len(evals_result['valid_0']), 2)
        self.assertIn('binary_logloss', evals_result['valid_0'])
        self.assertIn('error', evals_result['valid_0'])

        # non-default metric in params with custom one
1654
        train_booster(params=params_obj_metric_err_verbose, feval=constant_metric)
1655
1656
1657
1658
1659
        self.assertEqual(len(evals_result['valid_0']), 2)
        self.assertIn('binary_error', evals_result['valid_0'])
        self.assertIn('error', evals_result['valid_0'])

        # multiple metrics in params with custom one
1660
        train_booster(params=params_obj_metric_multi_verbose, feval=constant_metric)
1661
1662
1663
1664
1665
1666
        self.assertEqual(len(evals_result['valid_0']), 3)
        self.assertIn('binary_logloss', evals_result['valid_0'])
        self.assertIn('binary_error', evals_result['valid_0'])
        self.assertIn('error', evals_result['valid_0'])

        # custom metric is evaluated despite 'None' is passed
1667
        train_booster(params=params_obj_metric_none_verbose, feval=constant_metric)
1668
1669
1670
1671
1672
        self.assertEqual(len(evals_result), 1)
        self.assertIn('error', evals_result['valid_0'])

        # fobj, feval
        # no default metric, only custom one
1673
        train_booster(params=params_verbose, fobj=dummy_obj, feval=constant_metric)
1674
1675
1676
1677
        self.assertEqual(len(evals_result['valid_0']), 1)
        self.assertIn('error', evals_result['valid_0'])

        # metric in params with custom one
1678
        train_booster(params=params_metric_log_verbose, fobj=dummy_obj, feval=constant_metric)
1679
1680
1681
1682
1683
        self.assertEqual(len(evals_result['valid_0']), 2)
        self.assertIn('binary_logloss', evals_result['valid_0'])
        self.assertIn('error', evals_result['valid_0'])

        # multiple metrics in params with custom one
1684
        train_booster(params=params_metric_multi_verbose, fobj=dummy_obj, feval=constant_metric)
1685
1686
1687
1688
1689
1690
        self.assertEqual(len(evals_result['valid_0']), 3)
        self.assertIn('binary_logloss', evals_result['valid_0'])
        self.assertIn('binary_error', evals_result['valid_0'])
        self.assertIn('error', evals_result['valid_0'])

        # custom metric is evaluated despite 'None' is passed
1691
        train_booster(params=params_metric_none_verbose, fobj=dummy_obj, feval=constant_metric)
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
        self.assertEqual(len(evals_result), 1)
        self.assertIn('error', evals_result['valid_0'])

        X, y = load_digits(3, True)
        lgb_train = lgb.Dataset(X, y, silent=True)

        obj_multi_aliases = ['multiclass', 'softmax', 'multiclassova', 'multiclass_ova', 'ova', 'ovr']
        for obj_multi_alias in obj_multi_aliases:
            params_obj_class_3_verbose = {'objective': obj_multi_alias, 'num_class': 3, 'verbose': -1}
            params_obj_class_1_verbose = {'objective': obj_multi_alias, 'num_class': 1, 'verbose': -1}
            params_obj_verbose = {'objective': obj_multi_alias, 'verbose': -1}
            # multiclass default metric
            res = get_cv_result(params_obj_class_3_verbose)
            self.assertEqual(len(res), 2)
            self.assertIn('multi_logloss-mean', res)
            # multiclass default metric with custom one
1708
            res = get_cv_result(params_obj_class_3_verbose, feval=constant_metric)
1709
1710
1711
1712
            self.assertEqual(len(res), 4)
            self.assertIn('multi_logloss-mean', res)
            self.assertIn('error-mean', res)
            # multiclass metric alias with custom one for custom objective
1713
            res = get_cv_result(params_obj_class_3_verbose, fobj=dummy_obj, feval=constant_metric)
1714
1715
1716
            self.assertEqual(len(res), 2)
            self.assertIn('error-mean', res)
            # no metric for invalid class_num
1717
            res = get_cv_result(params_obj_class_1_verbose, fobj=dummy_obj)
1718
1719
            self.assertEqual(len(res), 0)
            # custom metric for invalid class_num
1720
            res = get_cv_result(params_obj_class_1_verbose, fobj=dummy_obj, feval=constant_metric)
1721
1722
1723
1724
1725
            self.assertEqual(len(res), 2)
            self.assertIn('error-mean', res)
            # multiclass metric alias with custom one with invalid class_num
            self.assertRaises(lgb.basic.LightGBMError, get_cv_result,
                              params_obj_class_1_verbose, metrics=obj_multi_alias,
1726
                              fobj=dummy_obj, feval=constant_metric)
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
            # multiclass default metric without num_class
            self.assertRaises(lgb.basic.LightGBMError, get_cv_result,
                              params_obj_verbose)
            for metric_multi_alias in obj_multi_aliases + ['multi_logloss']:
                # multiclass metric alias
                res = get_cv_result(params_obj_class_3_verbose, metrics=metric_multi_alias)
                self.assertEqual(len(res), 2)
                self.assertIn('multi_logloss-mean', res)
            # multiclass metric
            res = get_cv_result(params_obj_class_3_verbose, metrics='multi_error')
            self.assertEqual(len(res), 2)
            self.assertIn('multi_error-mean', res)
            # non-valid metric for multiclass objective
            self.assertRaises(lgb.basic.LightGBMError, get_cv_result,
                              params_obj_class_3_verbose, metrics='binary_logloss')
        params_class_3_verbose = {'num_class': 3, 'verbose': -1}
        # non-default num_class for default objective
        self.assertRaises(lgb.basic.LightGBMError, get_cv_result,
                          params_class_3_verbose)
        # no metric with non-default num_class for custom objective
1747
        res = get_cv_result(params_class_3_verbose, fobj=dummy_obj)
1748
1749
1750
        self.assertEqual(len(res), 0)
        for metric_multi_alias in obj_multi_aliases + ['multi_logloss']:
            # multiclass metric alias for custom objective
1751
            res = get_cv_result(params_class_3_verbose, metrics=metric_multi_alias, fobj=dummy_obj)
1752
1753
1754
            self.assertEqual(len(res), 2)
            self.assertIn('multi_logloss-mean', res)
        # multiclass metric for custom objective
1755
        res = get_cv_result(params_class_3_verbose, metrics='multi_error', fobj=dummy_obj)
1756
1757
1758
1759
        self.assertEqual(len(res), 2)
        self.assertIn('multi_error-mean', res)
        # binary metric with non-default num_class for custom objective
        self.assertRaises(lgb.basic.LightGBMError, get_cv_result,
1760
                          params_class_3_verbose, metrics='binary_error', fobj=dummy_obj)
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787

    @unittest.skipIf(psutil.virtual_memory().available / 1024 / 1024 / 1024 < 3, 'not enough RAM')
    def test_model_size(self):
        X, y = load_boston(True)
        data = lgb.Dataset(X, y)
        bst = lgb.train({'verbose': -1}, data, num_boost_round=2)
        y_pred = bst.predict(X)
        model_str = bst.model_to_string()
        one_tree = model_str[model_str.find('Tree=1'):model_str.find('end of trees')]
        one_tree_size = len(one_tree)
        one_tree = one_tree.replace('Tree=1', 'Tree={}')
        multiplier = 100
        total_trees = multiplier + 2
        try:
            new_model_str = (model_str[:model_str.find('tree_sizes')]
                             + '\n\n'
                             + model_str[model_str.find('Tree=0'):model_str.find('end of trees')]
                             + (one_tree * multiplier).format(*range(2, total_trees))
                             + model_str[model_str.find('end of trees'):]
                             + ' ' * (2**31 - one_tree_size * total_trees))
            self.assertGreater(len(new_model_str), 2**31)
            bst.model_from_string(new_model_str, verbose=False)
            self.assertEqual(bst.num_trees(), total_trees)
            y_pred_new = bst.predict(X, num_iteration=2)
            np.testing.assert_allclose(y_pred, y_pred_new)
        except MemoryError:
            self.skipTest('not enough RAM')
1788
1789
1790

    def test_get_split_value_histogram(self):
        X, y = load_boston(True)
1791
        lgb_train = lgb.Dataset(X, y, categorical_feature=[2])
1792
1793
1794
        gbm = lgb.train({'verbose': -1}, lgb_train, num_boost_round=20)
        # test XGBoost-style return value
        params = {'feature': 0, 'xgboost_style': True}
1795
1796
        self.assertTupleEqual(gbm.get_split_value_histogram(**params).shape, (9, 2))
        self.assertTupleEqual(gbm.get_split_value_histogram(bins=999, **params).shape, (9, 2))
1797
1798
1799
1800
        self.assertTupleEqual(gbm.get_split_value_histogram(bins=-1, **params).shape, (1, 2))
        self.assertTupleEqual(gbm.get_split_value_histogram(bins=0, **params).shape, (1, 2))
        self.assertTupleEqual(gbm.get_split_value_histogram(bins=1, **params).shape, (1, 2))
        self.assertTupleEqual(gbm.get_split_value_histogram(bins=2, **params).shape, (2, 2))
1801
        self.assertTupleEqual(gbm.get_split_value_histogram(bins=6, **params).shape, (5, 2))
1802
1803
        self.assertTupleEqual(gbm.get_split_value_histogram(bins=7, **params).shape, (6, 2))
        if lgb.compat.PANDAS_INSTALLED:
1804
            np.testing.assert_allclose(
1805
1806
1807
                gbm.get_split_value_histogram(0, xgboost_style=True).values,
                gbm.get_split_value_histogram(gbm.feature_name()[0], xgboost_style=True).values
            )
1808
            np.testing.assert_allclose(
1809
1810
1811
1812
                gbm.get_split_value_histogram(X.shape[-1] - 1, xgboost_style=True).values,
                gbm.get_split_value_histogram(gbm.feature_name()[X.shape[-1] - 1], xgboost_style=True).values
            )
        else:
1813
            np.testing.assert_allclose(
1814
1815
1816
                gbm.get_split_value_histogram(0, xgboost_style=True),
                gbm.get_split_value_histogram(gbm.feature_name()[0], xgboost_style=True)
            )
1817
            np.testing.assert_allclose(
1818
1819
1820
1821
1822
                gbm.get_split_value_histogram(X.shape[-1] - 1, xgboost_style=True),
                gbm.get_split_value_histogram(gbm.feature_name()[X.shape[-1] - 1], xgboost_style=True)
            )
        # test numpy-style return value
        hist, bins = gbm.get_split_value_histogram(0)
1823
1824
        self.assertEqual(len(hist), 23)
        self.assertEqual(len(bins), 24)
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
        hist, bins = gbm.get_split_value_histogram(0, bins=999)
        self.assertEqual(len(hist), 999)
        self.assertEqual(len(bins), 1000)
        self.assertRaises(ValueError, gbm.get_split_value_histogram, 0, bins=-1)
        self.assertRaises(ValueError, gbm.get_split_value_histogram, 0, bins=0)
        hist, bins = gbm.get_split_value_histogram(0, bins=1)
        self.assertEqual(len(hist), 1)
        self.assertEqual(len(bins), 2)
        hist, bins = gbm.get_split_value_histogram(0, bins=2)
        self.assertEqual(len(hist), 2)
        self.assertEqual(len(bins), 3)
        hist, bins = gbm.get_split_value_histogram(0, bins=6)
        self.assertEqual(len(hist), 6)
        self.assertEqual(len(bins), 7)
        hist, bins = gbm.get_split_value_histogram(0, bins=7)
        self.assertEqual(len(hist), 7)
        self.assertEqual(len(bins), 8)
        hist_idx, bins_idx = gbm.get_split_value_histogram(0)
        hist_name, bins_name = gbm.get_split_value_histogram(gbm.feature_name()[0])
        np.testing.assert_array_equal(hist_idx, hist_name)
1845
        np.testing.assert_allclose(bins_idx, bins_name)
1846
1847
1848
        hist_idx, bins_idx = gbm.get_split_value_histogram(X.shape[-1] - 1)
        hist_name, bins_name = gbm.get_split_value_histogram(gbm.feature_name()[X.shape[-1] - 1])
        np.testing.assert_array_equal(hist_idx, hist_name)
1849
        np.testing.assert_allclose(bins_idx, bins_name)
1850
1851
1852
1853
1854
1855
1856
        # test bins string type
        if np.__version__ > '1.11.0':
            hist_vals, bin_edges = gbm.get_split_value_histogram(0, bins='auto')
            hist = gbm.get_split_value_histogram(0, bins='auto', xgboost_style=True)
            if lgb.compat.PANDAS_INSTALLED:
                mask = hist_vals > 0
                np.testing.assert_array_equal(hist_vals[mask], hist['Count'].values)
1857
                np.testing.assert_allclose(bin_edges[1:][mask], hist['SplitValue'].values)
1858
1859
1860
            else:
                mask = hist_vals > 0
                np.testing.assert_array_equal(hist_vals[mask], hist[:, 1])
1861
                np.testing.assert_allclose(bin_edges[1:][mask], hist[:, 0])
1862
1863
        # test histogram is disabled for categorical features
        self.assertRaises(lgb.basic.LightGBMError, gbm.get_split_value_histogram, 2)
1864
1865
1866

    def test_early_stopping_for_only_first_metric(self):

1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
        def metrics_combination_train_regression(valid_sets, metric_list, assumed_iteration,
                                                 first_metric_only, feval=None):
            params = {
                'objective': 'regression',
                'learning_rate': 1.1,
                'num_leaves': 10,
                'metric': metric_list,
                'verbose': -1,
                'seed': 123
            }
            gbm = lgb.train(dict(params, first_metric_only=first_metric_only), lgb_train,
                            num_boost_round=25, valid_sets=valid_sets, feval=feval,
                            early_stopping_rounds=5, verbose_eval=False)
            self.assertEqual(assumed_iteration, gbm.best_iteration)

        def metrics_combination_cv_regression(metric_list, assumed_iteration,
                                              first_metric_only, eval_train_metric, feval=None):
            params = {
                'objective': 'regression',
                'learning_rate': 0.9,
                'num_leaves': 10,
                'metric': metric_list,
                'verbose': -1,
                'seed': 123,
                'gpu_use_dp': True
            }
            ret = lgb.cv(dict(params, first_metric_only=first_metric_only),
                         train_set=lgb_train, num_boost_round=25,
                         stratified=False, feval=feval,
                         early_stopping_rounds=5, verbose_eval=False,
                         eval_train_metric=eval_train_metric)
            self.assertEqual(assumed_iteration, len(ret[list(ret.keys())[0]]))

        X, y = load_boston(True)
        X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
        X_test1, X_test2, y_test1, y_test2 = train_test_split(X_test, y_test, test_size=0.5, random_state=73)
        lgb_train = lgb.Dataset(X_train, y_train)
        lgb_valid1 = lgb.Dataset(X_test1, y_test1, reference=lgb_train)
        lgb_valid2 = lgb.Dataset(X_test2, y_test2, reference=lgb_train)

        iter_valid1_l1 = 3
        iter_valid1_l2 = 14
        iter_valid2_l1 = 2
        iter_valid2_l2 = 15
        self.assertEqual(len(set([iter_valid1_l1, iter_valid1_l2, iter_valid2_l1, iter_valid2_l2])), 4)
        iter_min_l1 = min([iter_valid1_l1, iter_valid2_l1])
        iter_min_l2 = min([iter_valid1_l2, iter_valid2_l2])
        iter_min_valid1 = min([iter_valid1_l1, iter_valid1_l2])

1916
1917
        iter_cv_l1 = 4
        iter_cv_l2 = 12
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
        self.assertEqual(len(set([iter_cv_l1, iter_cv_l2])), 2)
        iter_cv_min = min([iter_cv_l1, iter_cv_l2])

        # test for lgb.train
        metrics_combination_train_regression(lgb_valid1, [], iter_valid1_l2, False)
        metrics_combination_train_regression(lgb_valid1, [], iter_valid1_l2, True)
        metrics_combination_train_regression(lgb_valid1, None, iter_valid1_l2, False)
        metrics_combination_train_regression(lgb_valid1, None, iter_valid1_l2, True)
        metrics_combination_train_regression(lgb_valid1, 'l2', iter_valid1_l2, True)
        metrics_combination_train_regression(lgb_valid1, 'l1', iter_valid1_l1, True)
        metrics_combination_train_regression(lgb_valid1, ['l2', 'l1'], iter_valid1_l2, True)
        metrics_combination_train_regression(lgb_valid1, ['l1', 'l2'], iter_valid1_l1, True)
        metrics_combination_train_regression(lgb_valid1, ['l2', 'l1'], iter_min_valid1, False)
        metrics_combination_train_regression(lgb_valid1, ['l1', 'l2'], iter_min_valid1, False)

        # test feval for lgb.train
        metrics_combination_train_regression(lgb_valid1, 'None', 1, False,
                                             feval=lambda preds, train_data: [decreasing_metric(preds, train_data),
                                                                              constant_metric(preds, train_data)])
        metrics_combination_train_regression(lgb_valid1, 'None', 25, True,
                                             feval=lambda preds, train_data: [decreasing_metric(preds, train_data),
                                                                              constant_metric(preds, train_data)])
        metrics_combination_train_regression(lgb_valid1, 'None', 1, True,
                                             feval=lambda preds, train_data: [constant_metric(preds, train_data),
                                                                              decreasing_metric(preds, train_data)])

        # test with two valid data for lgb.train
        metrics_combination_train_regression([lgb_valid1, lgb_valid2], ['l2', 'l1'], iter_min_l2, True)
        metrics_combination_train_regression([lgb_valid2, lgb_valid1], ['l2', 'l1'], iter_min_l2, True)
        metrics_combination_train_regression([lgb_valid1, lgb_valid2], ['l1', 'l2'], iter_min_l1, True)
        metrics_combination_train_regression([lgb_valid2, lgb_valid1], ['l1', 'l2'], iter_min_l1, True)

        # test for lgb.cv
        metrics_combination_cv_regression(None, iter_cv_l2, True, False)
        metrics_combination_cv_regression('l2', iter_cv_l2, True, False)
        metrics_combination_cv_regression('l1', iter_cv_l1, True, False)
        metrics_combination_cv_regression(['l2', 'l1'], iter_cv_l2, True, False)
        metrics_combination_cv_regression(['l1', 'l2'], iter_cv_l1, True, False)
        metrics_combination_cv_regression(['l2', 'l1'], iter_cv_min, False, False)
        metrics_combination_cv_regression(['l1', 'l2'], iter_cv_min, False, False)
        metrics_combination_cv_regression(None, iter_cv_l2, True, True)
        metrics_combination_cv_regression('l2', iter_cv_l2, True, True)
        metrics_combination_cv_regression('l1', iter_cv_l1, True, True)
        metrics_combination_cv_regression(['l2', 'l1'], iter_cv_l2, True, True)
        metrics_combination_cv_regression(['l1', 'l2'], iter_cv_l1, True, True)
        metrics_combination_cv_regression(['l2', 'l1'], iter_cv_min, False, True)
        metrics_combination_cv_regression(['l1', 'l2'], iter_cv_min, False, True)

        # test feval for lgb.cv
        metrics_combination_cv_regression('None', 1, False, False,
                                          feval=lambda preds, train_data: [decreasing_metric(preds, train_data),
                                                                           constant_metric(preds, train_data)])
        metrics_combination_cv_regression('None', 25, True, False,
                                          feval=lambda preds, train_data: [decreasing_metric(preds, train_data),
                                                                           constant_metric(preds, train_data)])
        metrics_combination_cv_regression('None', 1, True, False,
                                          feval=lambda preds, train_data: [constant_metric(preds, train_data),
                                                                           decreasing_metric(preds, train_data)])
1976
1977
1978
1979
1980
1981
1982

    def test_node_level_subcol(self):
        X, y = load_breast_cancer(True)
        X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
        params = {
            'objective': 'binary',
            'metric': 'binary_logloss',
1983
1984
            'feature_fraction_bynode': 0.8,
            'feature_fraction': 1.0,
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
            'verbose': -1
        }
        lgb_train = lgb.Dataset(X_train, y_train)
        lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)
        evals_result = {}
        gbm = lgb.train(params, lgb_train,
                        num_boost_round=25,
                        valid_sets=lgb_eval,
                        verbose_eval=False,
                        evals_result=evals_result)
        ret = log_loss(y_test, gbm.predict(X_test))
1996
        self.assertLess(ret, 0.14)
1997
1998
        self.assertAlmostEqual(evals_result['valid_0']['binary_logloss'][-1], ret, places=5)
        params['feature_fraction'] = 0.5
1999
        gbm2 = lgb.train(params, lgb_train, num_boost_round=25)
2000
2001
        ret2 = log_loss(y_test, gbm2.predict(X_test))
        self.assertNotEqual(ret, ret2)
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014

    def test_forced_bins(self):
        x = np.zeros((100, 2))
        x[:, 0] = np.arange(0, 1, 0.01)
        x[:, 1] = -np.arange(0, 1, 0.01)
        y = np.arange(0, 1, 0.01)
        forcedbins_filename = os.path.join(os.path.dirname(os.path.realpath(__file__)),
                                           '../../examples/regression/forced_bins.json')
        params = {'objective': 'regression_l1',
                  'max_bin': 5,
                  'forcedbins_filename': forcedbins_filename,
                  'num_leaves': 2,
                  'min_data_in_leaf': 1,
2015
                  'verbose': -1}
2016
        lgb_x = lgb.Dataset(x, label=y)
2017
        est = lgb.train(params, lgb_x, num_boost_round=20)
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
        new_x = np.zeros((3, x.shape[1]))
        new_x[:, 0] = [0.31, 0.37, 0.41]
        new_x[:, 1] = [0, 0, 0]
        predicted = est.predict(new_x)
        self.assertEqual(len(np.unique(predicted)), 3)
        new_x[:, 0] = [0, 0, 0]
        new_x[:, 1] = [-0.9, -0.6, -0.3]
        predicted = est.predict(new_x)
        self.assertEqual(len(np.unique(predicted)), 1)
        params['forcedbins_filename'] = ''
        lgb_x = lgb.Dataset(x, label=y)
2029
        est = lgb.train(params, lgb_x, num_boost_round=20)
2030
2031
2032
2033
2034
2035
        predicted = est.predict(new_x)
        self.assertEqual(len(np.unique(predicted)), 3)
        params['forcedbins_filename'] = os.path.join(os.path.dirname(os.path.realpath(__file__)),
                                                     '../../examples/regression/forced_bins2.json')
        params['max_bin'] = 11
        lgb_x = lgb.Dataset(x[:, :1], label=y)
2036
        est = lgb.train(params, lgb_x, num_boost_round=50)
2037
        predicted = est.predict(x[1:, :1])
2038
        _, counts = np.unique(predicted, return_counts=True)
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
        self.assertGreaterEqual(min(counts), 9)
        self.assertLessEqual(max(counts), 11)

    def test_binning_same_sign(self):
        # test that binning works properly for features with only positive or only negative values
        x = np.zeros((99, 2))
        x[:, 0] = np.arange(0.01, 1, 0.01)
        x[:, 1] = -np.arange(0.01, 1, 0.01)
        y = np.arange(0.01, 1, 0.01)
        params = {'objective': 'regression_l1',
                  'max_bin': 5,
                  'num_leaves': 2,
                  'min_data_in_leaf': 1,
                  'verbose': -1,
                  'seed': 0}
        lgb_x = lgb.Dataset(x, label=y)
2055
        est = lgb.train(params, lgb_x, num_boost_round=20)
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
        new_x = np.zeros((3, 2))
        new_x[:, 0] = [-1, 0, 1]
        predicted = est.predict(new_x)
        self.assertAlmostEqual(predicted[0], predicted[1])
        self.assertNotAlmostEqual(predicted[1], predicted[2])
        new_x = np.zeros((3, 2))
        new_x[:, 1] = [-1, 0, 1]
        predicted = est.predict(new_x)
        self.assertNotAlmostEqual(predicted[0], predicted[1])
        self.assertAlmostEqual(predicted[1], predicted[2])
2066

2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
    def test_dataset_update_params(self):
        default_params = {"max_bin": 100,
                          "max_bin_by_feature": [20, 10],
                          "bin_construct_sample_cnt": 10000,
                          "min_data_in_bin": 1,
                          "use_missing": False,
                          "zero_as_missing": False,
                          "categorical_feature": [0],
                          "feature_pre_filter": True,
                          "pre_partition": False,
                          "enable_bundle": True,
                          "data_random_seed": 0,
                          "is_enable_sparse": True,
                          "header": True,
                          "two_round": True,
                          "label_column": 0,
                          "weight_column": 0,
                          "group_column": 0,
                          "ignore_column": 0,
                          "min_data_in_leaf": 10,
                          "verbose": -1}
        unchangeable_params = {"max_bin": 150,
                               "max_bin_by_feature": [30, 5],
                               "bin_construct_sample_cnt": 5000,
                               "min_data_in_bin": 2,
                               "use_missing": True,
                               "zero_as_missing": True,
                               "categorical_feature": [0, 1],
                               "feature_pre_filter": False,
                               "pre_partition": True,
                               "enable_bundle": False,
                               "data_random_seed": 1,
                               "is_enable_sparse": False,
                               "header": False,
                               "two_round": False,
                               "label_column": 1,
                               "weight_column": 1,
                               "group_column": 1,
                               "ignore_column": 1,
                               "forcedbins_filename": "/some/path/forcedbins.json",
                               "min_data_in_leaf": 2}
        X = np.random.random((100, 2))
        y = np.random.random(100)

        # decreasing without freeing raw data is allowed
        lgb_data = lgb.Dataset(X, y, params=default_params, free_raw_data=False).construct()
        default_params["min_data_in_leaf"] -= 1
        lgb.train(default_params, lgb_data, num_boost_round=3)

        # decreasing before lazy init is allowed
        lgb_data = lgb.Dataset(X, y, params=default_params)
        default_params["min_data_in_leaf"] -= 1
        lgb.train(default_params, lgb_data, num_boost_round=3)

        # increasing is allowed
        default_params["min_data_in_leaf"] += 2
        lgb.train(default_params, lgb_data, num_boost_round=3)

        # decreasing with disabled filter is allowed
        default_params["feature_pre_filter"] = False
        lgb_data = lgb.Dataset(X, y, params=default_params).construct()
        default_params["min_data_in_leaf"] -= 4
        lgb.train(default_params, lgb_data, num_boost_round=3)

        # decreasing with enabled filter is disallowed;
        # also changes of other params are disallowed
        default_params["feature_pre_filter"] = True
        lgb_data = lgb.Dataset(X, y, params=default_params).construct()
        for key, value in unchangeable_params.items():
            new_params = default_params.copy()
            new_params[key] = value
            err_msg = ("Reducing `min_data_in_leaf` with `feature_pre_filter=true` may cause *"
                       if key == "min_data_in_leaf"
                       else "Cannot change {} *".format(key if key != "forcedbins_filename"
                                                        else "forced bins"))
            with np.testing.assert_raises_regex(lgb.basic.LightGBMError, err_msg):
                lgb.train(new_params, lgb_data, num_boost_round=3)

2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
    def test_dataset_params_with_reference(self):
        default_params = {"max_bin": 100}
        X = np.random.random((100, 2))
        y = np.random.random(100)
        X_val = np.random.random((100, 2))
        y_val = np.random.random(100)
        lgb_train = lgb.Dataset(X, y, params=default_params, free_raw_data=False).construct()
        lgb_val = lgb.Dataset(X_val, y_val, reference=lgb_train, free_raw_data=False).construct()
        self.assertDictEqual(lgb_train.get_params(), default_params)
        self.assertDictEqual(lgb_val.get_params(), default_params)
        model = lgb.train(default_params, lgb_train, valid_sets=[lgb_val])

2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
    def test_extra_trees(self):
        # check extra trees increases regularization
        X, y = load_boston(True)
        lgb_x = lgb.Dataset(X, label=y)
        params = {'objective': 'regression',
                  'num_leaves': 32,
                  'verbose': -1,
                  'extra_trees': False,
                  'seed': 0}
        est = lgb.train(params, lgb_x, num_boost_round=10)
        predicted = est.predict(X)
        err = mean_squared_error(y, predicted)
        params['extra_trees'] = True
        est = lgb.train(params, lgb_x, num_boost_round=10)
        predicted_new = est.predict(X)
        err_new = mean_squared_error(y, predicted_new)
        self.assertLess(err, err_new)

Belinda Trotta's avatar
Belinda Trotta committed
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
    def test_path_smoothing(self):
        # check path smoothing increases regularization
        X, y = load_boston(True)
        lgb_x = lgb.Dataset(X, label=y)
        params = {'objective': 'regression',
                  'num_leaves': 32,
                  'verbose': -1,
                  'seed': 0}
        est = lgb.train(params, lgb_x, num_boost_round=10)
        predicted = est.predict(X)
        err = mean_squared_error(y, predicted)
        params['path_smooth'] = 1
        est = lgb.train(params, lgb_x, num_boost_round=10)
        predicted_new = est.predict(X)
        err_new = mean_squared_error(y, predicted_new)
        self.assertLess(err, err_new)

2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
    @unittest.skipIf(not lgb.compat.PANDAS_INSTALLED, 'pandas is not installed')
    def test_trees_to_dataframe(self):

        def _imptcs_to_numpy(X, impcts_dict):
            cols = ['Column_' + str(i) for i in range(X.shape[1])]
            return [impcts_dict.get(col, 0.) for col in cols]

        X, y = load_breast_cancer(True)
        data = lgb.Dataset(X, label=y)
        num_trees = 10
        bst = lgb.train({"objective": "binary", "verbose": -1}, data, num_trees)
        tree_df = bst.trees_to_dataframe()
        split_dict = (tree_df[~tree_df['split_gain'].isnull()]
                      .groupby('split_feature')
                      .size()
                      .to_dict())

        gains_dict = (tree_df
                      .groupby('split_feature')['split_gain']
                      .sum()
                      .to_dict())

        tree_split = _imptcs_to_numpy(X, split_dict)
        tree_gains = _imptcs_to_numpy(X, gains_dict)
        mod_split = bst.feature_importance('split')
        mod_gains = bst.feature_importance('gain')
        num_trees_from_df = tree_df['tree_index'].nunique()
        obs_counts_from_df = tree_df.loc[tree_df['node_depth'] == 1, 'count'].values

        np.testing.assert_equal(tree_split, mod_split)
        np.testing.assert_allclose(tree_gains, mod_gains)
        self.assertEqual(num_trees_from_df, num_trees)
        np.testing.assert_equal(obs_counts_from_df, len(y))

        # test edge case with one leaf
        X = np.ones((10, 2))
        y = np.random.rand(10)
        data = lgb.Dataset(X, label=y)
        bst = lgb.train({"objective": "binary", "verbose": -1}, data, num_trees)
        tree_df = bst.trees_to_dataframe()

        self.assertEqual(len(tree_df), 1)
        self.assertEqual(tree_df.loc[0, 'tree_index'], 0)
        self.assertEqual(tree_df.loc[0, 'node_depth'], 1)
        self.assertEqual(tree_df.loc[0, 'node_index'], "0-L0")
        self.assertIsNotNone(tree_df.loc[0, 'value'])
        for col in ('left_child', 'right_child', 'parent_index', 'split_feature',
                    'split_gain', 'threshold', 'decision_type', 'missing_direction',
                    'missing_type', 'weight', 'count'):
            self.assertIsNone(tree_df.loc[0, col])
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251

    def test_interaction_constraints(self):
        X, y = load_boston(True)
        num_features = X.shape[1]
        train_data = lgb.Dataset(X, label=y)
        # check that constraint containing all features is equivalent to no constraint
        params = {'verbose': -1,
                  'seed': 0}
        est = lgb.train(params, train_data, num_boost_round=10)
        pred1 = est.predict(X)
2252
        est = lgb.train(dict(params, interaction_constraints=[list(range(num_features))]), train_data,
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
                        num_boost_round=10)
        pred2 = est.predict(X)
        np.testing.assert_allclose(pred1, pred2)
        # check that constraint partitioning the features reduces train accuracy
        est = lgb.train(dict(params, interaction_constraints=[list(range(num_features // 2)),
                                                              list(range(num_features // 2, num_features))]),
                        train_data, num_boost_round=10)
        pred3 = est.predict(X)
        self.assertLess(mean_squared_error(y, pred1), mean_squared_error(y, pred3))
        # check that constraints consisting of single features reduce accuracy further
        est = lgb.train(dict(params, interaction_constraints=[[i] for i in range(num_features)]), train_data,
                        num_boost_round=10)
        pred4 = est.predict(X)
        self.assertLess(mean_squared_error(y, pred3), mean_squared_error(y, pred4))
2267
2268
2269
2270
2271
2272
2273
        # test that interaction constraints work when not all features are used
        X = np.concatenate([np.zeros((X.shape[0], 1)), X], axis=1)
        num_features = X.shape[1]
        train_data = lgb.Dataset(X, label=y)
        est = lgb.train(dict(params, interaction_constraints=[[0] + list(range(2, num_features)),
                                                              [1] + list(range(2, num_features))]),
                        train_data, num_boost_round=10)