"tests/git@developer.sourcefind.cn:tianlh/lightgbm-dcu.git" did not exist on "b33a12ea3883f306388e69f12ceb421b1ee7ec29"
test_engine.py 19.7 KB
Newer Older
Guolin Ke's avatar
Guolin Ke committed
1
# coding: utf-8
wxchan's avatar
wxchan committed
2
# pylint: skip-file
wxchan's avatar
wxchan committed
3
4
5
6
7
import copy
import math
import os
import unittest

Guolin Ke's avatar
Guolin Ke committed
8
import lightgbm as lgb
Guolin Ke's avatar
Guolin Ke committed
9
import random
wxchan's avatar
wxchan committed
10
11
import numpy as np
from sklearn.datasets import (load_boston, load_breast_cancer, load_digits,
wxchan's avatar
wxchan committed
12
                              load_iris, load_svmlight_file)
wxchan's avatar
wxchan committed
13
from sklearn.metrics import log_loss, mean_absolute_error, mean_squared_error
wxchan's avatar
wxchan committed
14
from sklearn.model_selection import train_test_split, TimeSeriesSplit
wxchan's avatar
wxchan committed
15

wxchan's avatar
wxchan committed
16
17
18
19
20
21
try:
    import pandas as pd
    IS_PANDAS_INSTALLED = True
except ImportError:
    IS_PANDAS_INSTALLED = False

wxchan's avatar
wxchan committed
22
23
try:
    import cPickle as pickle
wxchan's avatar
wxchan committed
24
except ImportError:
wxchan's avatar
wxchan committed
25
    import pickle
wxchan's avatar
wxchan committed
26

wxchan's avatar
wxchan committed
27

wxchan's avatar
wxchan committed
28
29
30
def multi_logloss(y_true, y_pred):
    return np.mean([-math.log(y_pred[i][y]) for i, y in enumerate(y_true)])

wxchan's avatar
wxchan committed
31

wxchan's avatar
wxchan committed
32
class TestEngine(unittest.TestCase):
wxchan's avatar
wxchan committed
33
34

    def test_binary(self):
35
36
        X, y = load_breast_cancer(True)
        X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
wxchan's avatar
wxchan committed
37
        params = {
wxchan's avatar
wxchan committed
38
            'objective': 'binary',
39
            'metric': 'binary_logloss',
40
41
            'verbose': -1,
            'num_iteration': 50  # test num_iteration in dict here
wxchan's avatar
wxchan committed
42
        }
43
44
45
46
        lgb_train = lgb.Dataset(X_train, y_train)
        lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)
        evals_result = {}
        gbm = lgb.train(params, lgb_train,
47
                        num_boost_round=20,
48
49
50
51
                        valid_sets=lgb_eval,
                        verbose_eval=False,
                        evals_result=evals_result)
        ret = log_loss(y_test, gbm.predict(X_test))
wxchan's avatar
wxchan committed
52
        self.assertLess(ret, 0.15)
53
        self.assertEqual(len(evals_result['valid_0']['binary_logloss']), 50)
54
        self.assertAlmostEqual(evals_result['valid_0']['binary_logloss'][-1], ret, places=5)
wxchan's avatar
wxchan committed
55

Guolin Ke's avatar
Guolin Ke committed
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
    def test_rf(self):
        X, y = load_breast_cancer(True)
        X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
        params = {
            'boosting_type': 'rf',
            'objective': 'binary',
            'bagging_freq': 1,
            'bagging_fraction': 0.5,
            'feature_fraction': 0.5,
            'num_leaves': 50,
            'metric': 'binary_logloss',
            'verbose': -1
        }
        lgb_train = lgb.Dataset(X_train, y_train)
        lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)
        evals_result = {}
        gbm = lgb.train(params, lgb_train,
                        num_boost_round=50,
                        valid_sets=lgb_eval,
                        verbose_eval=False,
                        evals_result=evals_result)
        ret = log_loss(y_test, gbm.predict(X_test))
        self.assertLess(ret, 0.25)
        self.assertAlmostEqual(evals_result['valid_0']['binary_logloss'][-1], ret, places=5)

wxchan's avatar
wxchan committed
81
    def test_regreesion(self):
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
        X, y = load_boston(True)
        X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
        params = {
            'metric': 'l2',
            'verbose': -1
        }
        lgb_train = lgb.Dataset(X_train, y_train)
        lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)
        evals_result = {}
        gbm = lgb.train(params, lgb_train,
                        num_boost_round=50,
                        valid_sets=lgb_eval,
                        verbose_eval=False,
                        evals_result=evals_result)
        ret = mean_squared_error(y_test, gbm.predict(X_test))
97
        self.assertLess(ret, 16)
98
        self.assertAlmostEqual(evals_result['valid_0']['l2'][-1], ret, places=5)
wxchan's avatar
wxchan committed
99

Guolin Ke's avatar
Guolin Ke committed
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
    def test_missing_value_handle(self):
        X_train = np.zeros((1000, 1))
        y_train = np.zeros(1000)
        trues = random.sample(range(1000), 200)
        for idx in trues:
            X_train[idx, 0] = np.nan
            y_train[idx] = 1
        lgb_train = lgb.Dataset(X_train, y_train)
        lgb_eval = lgb.Dataset(X_train, y_train)

        params = {
            'metric': 'l2',
            'verbose': -1,
            'boost_from_average': False
        }
        evals_result = {}
        gbm = lgb.train(params, lgb_train,
                        num_boost_round=20,
                        valid_sets=lgb_eval,
                        verbose_eval=True,
                        evals_result=evals_result)
        ret = mean_squared_error(y_train, gbm.predict(X_train))
        self.assertLess(ret, 0.005)
        self.assertAlmostEqual(evals_result['valid_0']['l2'][-1], ret, places=5)

    def test_missing_value_handle_na(self):
        x = [0, 1, 2, 3, 4, 5, 6, 7, np.nan]
        y = [1, 1, 1, 1, 0, 0, 0, 0, 1]

        X_train = np.array(x).reshape(len(x), 1)
        y_train = np.array(y)
        lgb_train = lgb.Dataset(X_train, y_train)
        lgb_eval = lgb.Dataset(X_train, y_train)

        params = {
            'objective': 'binary',
            'metric': 'auc',
            'verbose': -1,
            'boost_from_average': False,
            'min_data': 1,
            'num_leaves': 2,
            'learning_rate': 1,
            'min_data_in_bin': 1,
            'zero_as_missing': False
        }
        evals_result = {}
        gbm = lgb.train(params, lgb_train,
                        num_boost_round=1,
                        valid_sets=lgb_eval,
                        verbose_eval=True,
                        evals_result=evals_result)
        pred = gbm.predict(X_train)
        self.assertAlmostEqual(pred[-1], pred[0], places=5)

    def test_missing_value_handle_zero(self):
        x = [0, 1, 2, 3, 4, 5, 6, 7, np.nan]
        y = [0, 1, 1, 1, 0, 0, 0, 0, 0]

        X_train = np.array(x).reshape(len(x), 1)
        y_train = np.array(y)
        lgb_train = lgb.Dataset(X_train, y_train)
        lgb_eval = lgb.Dataset(X_train, y_train)

        params = {
            'objective': 'binary',
            'metric': 'auc',
            'verbose': -1,
            'boost_from_average': False,
            'min_data': 1,
            'num_leaves': 2,
            'learning_rate': 1,
            'min_data_in_bin': 1,
            'zero_as_missing': True
        }
        evals_result = {}
        gbm = lgb.train(params, lgb_train,
                        num_boost_round=1,
                        valid_sets=lgb_eval,
                        verbose_eval=True,
                        evals_result=evals_result)
        pred = gbm.predict(X_train)
        self.assertAlmostEqual(pred[-1], pred[-2], places=5)
        self.assertAlmostEqual(pred[-1], pred[0], places=5)

    def test_missing_value_handle_none(self):
        x = [0, 1, 2, 3, 4, 5, 6, 7, np.nan]
        y = [0, 1, 1, 1, 0, 0, 0, 0, 0]

        X_train = np.array(x).reshape(len(x), 1)
        y_train = np.array(y)
        lgb_train = lgb.Dataset(X_train, y_train)
        lgb_eval = lgb.Dataset(X_train, y_train)

        params = {
            'objective': 'binary',
            'metric': 'auc',
            'verbose': -1,
            'boost_from_average': False,
            'min_data': 1,
            'num_leaves': 2,
            'learning_rate': 1,
            'min_data_in_bin': 1,
            'use_missing': False
        }
        evals_result = {}
        gbm = lgb.train(params, lgb_train,
                        num_boost_round=1,
                        valid_sets=lgb_eval,
                        verbose_eval=True,
                        evals_result=evals_result)
        pred = gbm.predict(X_train)
        self.assertAlmostEqual(pred[0], pred[1], places=5)
        self.assertAlmostEqual(pred[-1], pred[0], places=5)

wxchan's avatar
wxchan committed
214
    def test_multiclass(self):
215
216
        X, y = load_digits(10, True)
        X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
wxchan's avatar
wxchan committed
217
        params = {
wxchan's avatar
wxchan committed
218
219
            'objective': 'multiclass',
            'metric': 'multi_logloss',
220
221
            'num_class': 10,
            'verbose': -1
wxchan's avatar
wxchan committed
222
        }
223
224
225
226
227
228
229
230
231
        lgb_train = lgb.Dataset(X_train, y_train, params=params)
        lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train, params=params)
        evals_result = {}
        gbm = lgb.train(params, lgb_train,
                        num_boost_round=50,
                        valid_sets=lgb_eval,
                        verbose_eval=False,
                        evals_result=evals_result)
        ret = multi_logloss(y_test, gbm.predict(X_test))
wxchan's avatar
wxchan committed
232
        self.assertLess(ret, 0.2)
233
        self.assertAlmostEqual(evals_result['valid_0']['multi_logloss'][-1], ret, places=5)
wxchan's avatar
wxchan committed
234

cbecker's avatar
cbecker committed
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
    def test_multiclass_prediction_early_stopping(self):
        X, y = load_digits(10, True)
        X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
        params = {
            'objective': 'multiclass',
            'metric': 'multi_logloss',
            'num_class': 10,
            'verbose': -1
        }
        lgb_train = lgb.Dataset(X_train, y_train, params=params)
        lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train, params=params)
        evals_result = {}
        gbm = lgb.train(params, lgb_train,
                        num_boost_round=50,
                        valid_sets=lgb_eval,
                        verbose_eval=False,
                        evals_result=evals_result)

253
254
        pred_parameter = {"pred_early_stop": True, "pred_early_stop_freq": 5, "pred_early_stop_margin": 1.5}
        ret = multi_logloss(y_test, gbm.predict(X_test, pred_parameter=pred_parameter))
cbecker's avatar
cbecker committed
255
256
257
        self.assertLess(ret, 0.8)
        self.assertGreater(ret, 0.5)  # loss will be higher than when evaluating the full model

258
259
        pred_parameter = {"pred_early_stop": True, "pred_early_stop_freq": 5, "pred_early_stop_margin": 5.5}
        ret = multi_logloss(y_test, gbm.predict(X_test, pred_parameter=pred_parameter))
cbecker's avatar
cbecker committed
260
261
        self.assertLess(ret, 0.2)

262
    def test_early_stopping(self):
263
        X, y = load_breast_cancer(True)
264
265
266
        params = {
            'objective': 'binary',
            'metric': 'binary_logloss',
267
            'verbose': -1
268
        }
269
        X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
270
271
        lgb_train = lgb.Dataset(X_train, y_train)
        lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)
wxchan's avatar
wxchan committed
272
        valid_set_name = 'valid_set'
273
274
275
276
        # no early stopping
        gbm = lgb.train(params, lgb_train,
                        num_boost_round=10,
                        valid_sets=lgb_eval,
wxchan's avatar
wxchan committed
277
                        valid_names=valid_set_name,
278
279
                        verbose_eval=False,
                        early_stopping_rounds=5)
280
        self.assertEqual(gbm.best_iteration, 0)
wxchan's avatar
wxchan committed
281
282
        self.assertIn(valid_set_name, gbm.best_score)
        self.assertIn('binary_logloss', gbm.best_score[valid_set_name])
283
284
285
        # early stopping occurs
        gbm = lgb.train(params, lgb_train,
                        valid_sets=lgb_eval,
wxchan's avatar
wxchan committed
286
                        valid_names=valid_set_name,
287
288
289
                        verbose_eval=False,
                        early_stopping_rounds=5)
        self.assertLessEqual(gbm.best_iteration, 100)
wxchan's avatar
wxchan committed
290
291
        self.assertIn(valid_set_name, gbm.best_score)
        self.assertIn('binary_logloss', gbm.best_score[valid_set_name])
292

293
    def test_continue_train(self):
294
295
        X, y = load_boston(True)
        X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
wxchan's avatar
wxchan committed
296
        params = {
wxchan's avatar
wxchan committed
297
            'objective': 'regression',
298
299
            'metric': 'l1',
            'verbose': -1
wxchan's avatar
wxchan committed
300
        }
301
302
303
        lgb_train = lgb.Dataset(X_train, y_train, free_raw_data=False)
        lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train, free_raw_data=False)
        init_gbm = lgb.train(params, lgb_train, num_boost_round=20)
wxchan's avatar
wxchan committed
304
        model_name = 'model.txt'
305
306
307
308
309
310
311
312
313
314
315
        init_gbm.save_model(model_name)
        evals_result = {}
        gbm = lgb.train(params, lgb_train,
                        num_boost_round=30,
                        valid_sets=lgb_eval,
                        verbose_eval=False,
                        # test custom eval metrics
                        feval=(lambda p, d: ('mae', mean_absolute_error(p, d.get_label()), False)),
                        evals_result=evals_result,
                        init_model='model.txt')
        ret = mean_absolute_error(y_test, gbm.predict(X_test))
Guolin Ke's avatar
Guolin Ke committed
316
        self.assertLess(ret, 3.5)
317
318
        self.assertAlmostEqual(evals_result['valid_0']['l1'][-1], ret, places=5)
        for l1, mae in zip(evals_result['valid_0']['l1'], evals_result['valid_0']['mae']):
wxchan's avatar
wxchan committed
319
320
321
322
            self.assertAlmostEqual(l1, mae, places=5)
        os.remove(model_name)

    def test_continue_train_multiclass(self):
323
324
        X, y = load_iris(True)
        X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
wxchan's avatar
wxchan committed
325
        params = {
wxchan's avatar
wxchan committed
326
327
            'objective': 'multiclass',
            'metric': 'multi_logloss',
328
329
            'num_class': 3,
            'verbose': -1
wxchan's avatar
wxchan committed
330
        }
331
332
333
334
335
336
337
338
339
340
341
        lgb_train = lgb.Dataset(X_train, y_train, params=params, free_raw_data=False)
        lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train, params=params, free_raw_data=False)
        init_gbm = lgb.train(params, lgb_train, num_boost_round=20)
        evals_result = {}
        gbm = lgb.train(params, lgb_train,
                        num_boost_round=30,
                        valid_sets=lgb_eval,
                        verbose_eval=False,
                        evals_result=evals_result,
                        init_model=init_gbm)
        ret = multi_logloss(y_test, gbm.predict(X_test))
wxchan's avatar
wxchan committed
342
        self.assertLess(ret, 1.5)
343
        self.assertAlmostEqual(evals_result['valid_0']['multi_logloss'][-1], ret, places=5)
wxchan's avatar
wxchan committed
344
345

    def test_cv(self):
346
347
348
349
350
351
352
        X, y = load_boston(True)
        X_train, _, y_train, _ = train_test_split(X, y, test_size=0.1, random_state=42)
        params = {'verbose': -1}
        lgb_train = lgb.Dataset(X_train, y_train)
        # shuffle = False, override metric in params
        params_with_metric = {'metric': 'l2', 'verbose': -1}
        lgb.cv(params_with_metric, lgb_train, num_boost_round=10, nfold=3, shuffle=False,
wxchan's avatar
wxchan committed
353
354
               metrics='l1', verbose_eval=False)
        # shuffle = True, callbacks
355
        lgb.cv(params, lgb_train, num_boost_round=10, nfold=3, shuffle=True,
356
               metrics='l1', verbose_eval=False,
357
               callbacks=[lgb.reset_parameter(learning_rate=lambda i: 0.1 - 0.001 * i)])
358
        # self defined folds
wxchan's avatar
wxchan committed
359
        tss = TimeSeriesSplit(3)
360
361
        folds = tss.split(X_train)
        lgb.cv(params_with_metric, lgb_train, num_boost_round=10, folds=folds, verbose_eval=False)
wxchan's avatar
wxchan committed
362
        # lambdarank
363
364
        X_train, y_train = load_svmlight_file(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../examples/lambdarank/rank.train'))
        q_train = np.loadtxt(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../examples/lambdarank/rank.train.query'))
365
366
367
        params_lambdarank = {'objective': 'lambdarank', 'verbose': -1}
        lgb_train = lgb.Dataset(X_train, y_train, group=q_train)
        lgb.cv(params_lambdarank, lgb_train, num_boost_round=10, nfold=3, metrics='l2', verbose_eval=False)
wxchan's avatar
wxchan committed
368

wxchan's avatar
wxchan committed
369
    def test_feature_name(self):
370
371
372
373
        X, y = load_boston(True)
        X_train, _, y_train, _ = train_test_split(X, y, test_size=0.1, random_state=42)
        params = {'verbose': -1}
        lgb_train = lgb.Dataset(X_train, y_train)
374
        feature_names = ['f_' + str(i) for i in range(13)]
375
        gbm = lgb.train(params, lgb_train, num_boost_round=5, feature_name=feature_names)
376
377
378
        self.assertListEqual(feature_names, gbm.feature_name())
        # test feature_names with whitespaces
        feature_names_with_space = ['f ' + str(i) for i in range(13)]
379
        gbm = lgb.train(params, lgb_train, num_boost_round=5, feature_name=feature_names_with_space)
wxchan's avatar
wxchan committed
380
381
        self.assertListEqual(feature_names, gbm.feature_name())

wxchan's avatar
wxchan committed
382
    def test_save_load_copy_pickle(self):
383
384
385
386
387
388
389
390
391
392
393
394
395
        def test_template(init_model=None, return_model=False):
            X, y = load_boston(True)
            params = {
                'objective': 'regression',
                'metric': 'l2',
                'verbose': -1
            }
            X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
            lgb_train = lgb.Dataset(X_train, y_train)
            gbm_template = lgb.train(params, lgb_train, num_boost_round=10, init_model=init_model)
            return gbm_template if return_model else mean_squared_error(y_test, gbm_template.predict(X_test))
        gbm = test_template(return_model=True)
        ret_origin = test_template(init_model=gbm)
wxchan's avatar
wxchan committed
396
397
        other_ret = []
        gbm.save_model('lgb.model')
398
        other_ret.append(test_template(init_model='lgb.model'))
wxchan's avatar
wxchan committed
399
        gbm_load = lgb.Booster(model_file='lgb.model')
400
401
402
        other_ret.append(test_template(init_model=gbm_load))
        other_ret.append(test_template(init_model=copy.copy(gbm)))
        other_ret.append(test_template(init_model=copy.deepcopy(gbm)))
wxchan's avatar
wxchan committed
403
404
405
406
        with open('lgb.pkl', 'wb') as f:
            pickle.dump(gbm, f)
        with open('lgb.pkl', 'rb') as f:
            gbm_pickle = pickle.load(f)
407
        other_ret.append(test_template(init_model=gbm_pickle))
wxchan's avatar
wxchan committed
408
        gbm_pickles = pickle.loads(pickle.dumps(gbm))
409
        other_ret.append(test_template(init_model=gbm_pickles))
wxchan's avatar
wxchan committed
410
411
        for ret in other_ret:
            self.assertAlmostEqual(ret_origin, ret, places=5)
wxchan's avatar
wxchan committed
412

413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
    @unittest.skipIf(not IS_PANDAS_INSTALLED, 'pandas not installed')
    def test_pandas_categorical(self):
        X = pd.DataFrame({"A": np.random.permutation(['a', 'b', 'c', 'd'] * 75),  # str
                          "B": np.random.permutation([1, 2, 3] * 100),  # int
                          "C": np.random.permutation([0.1, 0.2, -0.1, -0.1, 0.2] * 60),  # float
                          "D": np.random.permutation([True, False] * 150)})  # bool
        y = np.random.permutation([0, 1] * 150)
        X_test = pd.DataFrame({"A": np.random.permutation(['a', 'b', 'e'] * 20),
                               "B": np.random.permutation([1, 3] * 30),
                               "C": np.random.permutation([0.1, -0.1, 0.2, 0.2] * 15),
                               "D": np.random.permutation([True, False] * 30)})
        for col in ["A", "B", "C", "D"]:
            X[col] = X[col].astype('category')
            X_test[col] = X_test[col].astype('category')
        params = {
            'objective': 'binary',
            'metric': 'binary_logloss',
            'verbose': -1
        }
        lgb_train = lgb.Dataset(X, y)
        gbm0 = lgb.train(params, lgb_train, num_boost_round=10, verbose_eval=False)
        pred0 = list(gbm0.predict(X_test))
        lgb_train = lgb.Dataset(X, y)
        gbm1 = lgb.train(params, lgb_train, num_boost_round=10, verbose_eval=False,
                         categorical_feature=[0])
        pred1 = list(gbm1.predict(X_test))
        lgb_train = lgb.Dataset(X, y)
        gbm2 = lgb.train(params, lgb_train, num_boost_round=10, verbose_eval=False,
                         categorical_feature=['A'])
        pred2 = list(gbm2.predict(X_test))
        lgb_train = lgb.Dataset(X, y)
        gbm3 = lgb.train(params, lgb_train, num_boost_round=10, verbose_eval=False,
                         categorical_feature=['A', 'B', 'C', 'D'])
        pred3 = list(gbm3.predict(X_test))
        lgb_train = lgb.Dataset(X, y)
        gbm3.save_model('categorical.model')
        gbm4 = lgb.Booster(model_file='categorical.model')
        pred4 = list(gbm4.predict(X_test))
451
452
453
454
        np.testing.assert_almost_equal(pred0, pred1)
        np.testing.assert_almost_equal(pred0, pred2)
        np.testing.assert_almost_equal(pred0, pred3)
        np.testing.assert_almost_equal(pred0, pred4)
455

456
457
458
    def test_reference_chain(self):
        X = np.random.normal(size=(100, 2))
        y = np.random.normal(size=100)
459
460
        tmp_dat = lgb.Dataset(X, y)
        # take subsets and train
461
462
        tmp_dat_train = tmp_dat.subset(np.arange(80))
        tmp_dat_val = tmp_dat.subset(np.arange(80, 100)).subset(np.arange(18))
463
464
        params = {'objective': 'regression_l2', 'metric': 'rmse'}
        gbm = lgb.train(params, tmp_dat_train, num_boost_round=20, valid_sets=[tmp_dat_train, tmp_dat_val])