test_basic.py 16.5 KB
Newer Older
wxchan's avatar
wxchan committed
1
# coding: utf-8
wxchan's avatar
wxchan committed
2
3
4
5
6
import os
import tempfile
import unittest

import lightgbm as lgb
wxchan's avatar
wxchan committed
7
import numpy as np
8
9

from scipy import sparse
10
from sklearn.datasets import dump_svmlight_file, load_svmlight_file
wxchan's avatar
wxchan committed
11
from sklearn.model_selection import train_test_split
wxchan's avatar
wxchan committed
12

13
14
from .utils import load_breast_cancer

wxchan's avatar
wxchan committed
15

wxchan's avatar
wxchan committed
16
class TestBasic(unittest.TestCase):
wxchan's avatar
wxchan committed
17

wxchan's avatar
wxchan committed
18
    def test(self):
19
        X_train, X_test, y_train, y_test = train_test_split(*load_breast_cancer(return_X_y=True),
20
                                                            test_size=0.1, random_state=2)
21
        train_data = lgb.Dataset(X_train, label=y_train)
wxchan's avatar
wxchan committed
22
        valid_data = train_data.create_valid(X_test, label=y_test)
wxchan's avatar
wxchan committed
23

wxchan's avatar
wxchan committed
24
        params = {
wxchan's avatar
wxchan committed
25
26
            "objective": "binary",
            "metric": "auc",
Guolin Ke's avatar
Guolin Ke committed
27
            "min_data": 10,
wxchan's avatar
wxchan committed
28
            "num_leaves": 15,
29
            "verbose": -1,
30
            "num_threads": 1,
31
32
            "max_bin": 255,
            "gpu_use_dp": True
wxchan's avatar
wxchan committed
33
34
35
        }
        bst = lgb.Booster(params, train_data)
        bst.add_valid(valid_data, "valid_1")
wxchan's avatar
wxchan committed
36

37
        for i in range(20):
wxchan's avatar
wxchan committed
38
39
40
            bst.update()
            if i % 10 == 0:
                print(bst.eval_train(), bst.eval_valid())
41

42
43
        self.assertEqual(bst.current_iteration(), 20)
        self.assertEqual(bst.num_trees(), 20)
44
        self.assertEqual(bst.num_model_per_iteration(), 1)
45
46
        self.assertAlmostEqual(bst.lower_bound(), -2.9040190126976606)
        self.assertAlmostEqual(bst.upper_bound(), 3.3182142872462883)
47

wxchan's avatar
wxchan committed
48
49
50
        bst.save_model("model.txt")
        pred_from_matr = bst.predict(X_test)
        with tempfile.NamedTemporaryFile() as f:
51
52
            tname = f.name
        with open(tname, "w+b") as f:
Guolin Ke's avatar
Guolin Ke committed
53
            dump_svmlight_file(X_test, y_test, f)
54
55
        pred_from_file = bst.predict(tname)
        os.remove(tname)
56
        np.testing.assert_allclose(pred_from_matr, pred_from_file)
cbecker's avatar
cbecker committed
57

wxchan's avatar
wxchan committed
58
        # check saved model persistence
59
        bst = lgb.Booster(params, model_file="model.txt")
60
        os.remove("model.txt")
61
        pred_from_model_file = bst.predict(X_test)
62
63
        # we need to check the consistency of model file here, so test for exact equal
        np.testing.assert_array_equal(pred_from_matr, pred_from_model_file)
cbecker's avatar
cbecker committed
64
65

        # check early stopping is working. Make it stop very early, so the scores should be very close to zero
66
        pred_parameter = {"pred_early_stop": True, "pred_early_stop_freq": 5, "pred_early_stop_margin": 1.5}
67
        pred_early_stopping = bst.predict(X_test, **pred_parameter)
68
69
        # scores likely to be different, but prediction should still be the same
        np.testing.assert_array_equal(np.sign(pred_from_matr), np.sign(pred_early_stopping))
70

71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
        # test that shape is checked during prediction
        bad_X_test = X_test[:, 1:]
        bad_shape_error_msg = "The number of features in data*"
        np.testing.assert_raises_regex(lgb.basic.LightGBMError, bad_shape_error_msg,
                                       bst.predict, bad_X_test)
        np.testing.assert_raises_regex(lgb.basic.LightGBMError, bad_shape_error_msg,
                                       bst.predict, sparse.csr_matrix(bad_X_test))
        np.testing.assert_raises_regex(lgb.basic.LightGBMError, bad_shape_error_msg,
                                       bst.predict, sparse.csc_matrix(bad_X_test))
        with open(tname, "w+b") as f:
            dump_svmlight_file(bad_X_test, y_test, f)
        np.testing.assert_raises_regex(lgb.basic.LightGBMError, bad_shape_error_msg,
                                       bst.predict, tname)
        with open(tname, "w+b") as f:
            dump_svmlight_file(X_test, y_test, f, zero_based=False)
        np.testing.assert_raises_regex(lgb.basic.LightGBMError, bad_shape_error_msg,
                                       bst.predict, tname)
        os.remove(tname)

90
    def test_chunked_dataset(self):
91
        X_train, X_test, y_train, y_test = train_test_split(*load_breast_cancer(return_X_y=True), test_size=0.1, random_state=2)
92
93
94
95
96
97
98
99
100

        chunk_size = X_train.shape[0] // 10 + 1
        X_train = [X_train[i * chunk_size:(i + 1) * chunk_size, :] for i in range(X_train.shape[0] // chunk_size + 1)]
        X_test = [X_test[i * chunk_size:(i + 1) * chunk_size, :] for i in range(X_test.shape[0] // chunk_size + 1)]

        train_data = lgb.Dataset(X_train, label=y_train, params={"bin_construct_sample_cnt": 100})
        valid_data = train_data.create_valid(X_test, label=y_test, params={"bin_construct_sample_cnt": 100})
        train_data.construct()
        valid_data.construct()
101

102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
    def test_chunked_dataset_linear(self):
        X_train, X_test, y_train, y_test = train_test_split(*load_breast_cancer(return_X_y=True), test_size=0.1,
                                                            random_state=2)
        chunk_size = X_train.shape[0] // 10 + 1
        X_train = [X_train[i * chunk_size:(i + 1) * chunk_size, :] for i in range(X_train.shape[0] // chunk_size + 1)]
        X_test = [X_test[i * chunk_size:(i + 1) * chunk_size, :] for i in range(X_test.shape[0] // chunk_size + 1)]
        params = {"bin_construct_sample_cnt": 100, 'linear_tree': True}
        train_data = lgb.Dataset(X_train, label=y_train, params=params)
        valid_data = train_data.create_valid(X_test, label=y_test, params=params)
        train_data.construct()
        valid_data.construct()

    def test_save_and_load_linear(self):
        X_train, X_test, y_train, y_test = train_test_split(*load_breast_cancer(return_X_y=True), test_size=0.1,
                                                            random_state=2)
        X_train = np.concatenate([np.ones((X_train.shape[0], 1)), X_train], 1)
        X_train[:X_train.shape[0] // 2, 0] = 0
        y_train[:X_train.shape[0] // 2] = 1
        params = {'linear_tree': True}
        train_data = lgb.Dataset(X_train, label=y_train, params=params)
        est = lgb.train(params, train_data, num_boost_round=10, categorical_feature=[0])
        pred1 = est.predict(X_train)
        train_data.save_binary('temp_dataset.bin')
        train_data_2 = lgb.Dataset('temp_dataset.bin')
        est = lgb.train(params, train_data_2, num_boost_round=10)
        pred2 = est.predict(X_train)
        np.testing.assert_allclose(pred1, pred2)
        est.save_model('temp_model.txt')
        est2 = lgb.Booster(model_file='temp_model.txt')
        pred3 = est2.predict(X_train)
        np.testing.assert_allclose(pred2, pred3)

134
    def test_subset_group(self):
135
136
137
138
        X_train, y_train = load_svmlight_file(os.path.join(os.path.dirname(os.path.realpath(__file__)),
                                                           '../../examples/lambdarank/rank.train'))
        q_train = np.loadtxt(os.path.join(os.path.dirname(os.path.realpath(__file__)),
                                          '../../examples/lambdarank/rank.train.query'))
139
140
        lgb_train = lgb.Dataset(X_train, y_train, group=q_train)
        self.assertEqual(len(lgb_train.get_group()), 201)
141
        subset = lgb_train.subset(list(range(10))).construct()
142
143
144
145
        subset_group = subset.get_group()
        self.assertEqual(len(subset_group), 2)
        self.assertEqual(subset_group[0], 1)
        self.assertEqual(subset_group[1], 9)
146
147

    def test_add_features_throws_if_num_data_unequal(self):
148
149
        X1 = np.random.random((100, 1))
        X2 = np.random.random((10, 1))
150
151
152
153
154
155
        d1 = lgb.Dataset(X1).construct()
        d2 = lgb.Dataset(X2).construct()
        with self.assertRaises(lgb.basic.LightGBMError):
            d1.add_features_from(d2)

    def test_add_features_throws_if_datasets_unconstructed(self):
156
157
        X1 = np.random.random((100, 1))
        X2 = np.random.random((100, 1))
158
159
160
161
162
163
164
165
166
167
168
169
170
171
        with self.assertRaises(ValueError):
            d1 = lgb.Dataset(X1)
            d2 = lgb.Dataset(X2)
            d1.add_features_from(d2)
        with self.assertRaises(ValueError):
            d1 = lgb.Dataset(X1).construct()
            d2 = lgb.Dataset(X2)
            d1.add_features_from(d2)
        with self.assertRaises(ValueError):
            d1 = lgb.Dataset(X1)
            d2 = lgb.Dataset(X2).construct()
            d1.add_features_from(d2)

    def test_add_features_equal_data_on_alternating_used_unused(self):
172
173
        self.maxDiff = None
        X = np.random.random((100, 5))
174
        X[:, [1, 3]] = 0
175
        names = ['col_%d' % i for i in range(5)]
176
177
178
179
180
181
        for j in range(1, 5):
            d1 = lgb.Dataset(X[:, :j], feature_name=names[:j]).construct()
            d2 = lgb.Dataset(X[:, j:], feature_name=names[j:]).construct()
            d1.add_features_from(d2)
            with tempfile.NamedTemporaryFile() as f:
                d1name = f.name
182
            d1._dump_text(d1name)
183
184
185
            d = lgb.Dataset(X, feature_name=names).construct()
            with tempfile.NamedTemporaryFile() as f:
                dname = f.name
186
            d._dump_text(dname)
187
188
189
190
191
192
193
194
195
            with open(d1name, 'rt') as d1f:
                d1txt = d1f.read()
            with open(dname, 'rt') as df:
                dtxt = df.read()
            os.remove(dname)
            os.remove(d1name)
            self.assertEqual(dtxt, d1txt)

    def test_add_features_same_booster_behaviour(self):
196
197
        self.maxDiff = None
        X = np.random.random((100, 5))
198
        X[:, [1, 3]] = 0
199
        names = ['col_%d' % i for i in range(5)]
200
201
202
203
204
        for j in range(1, 5):
            d1 = lgb.Dataset(X[:, :j], feature_name=names[:j]).construct()
            d2 = lgb.Dataset(X[:, j:], feature_name=names[j:]).construct()
            d1.add_features_from(d2)
            d = lgb.Dataset(X, feature_name=names).construct()
205
            y = np.random.random(100)
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
            d1.set_label(y)
            d.set_label(y)
            b1 = lgb.Booster(train_set=d1)
            b = lgb.Booster(train_set=d)
            for k in range(10):
                b.update()
                b1.update()
            with tempfile.NamedTemporaryFile() as df:
                dname = df.name
            with tempfile.NamedTemporaryFile() as d1f:
                d1name = d1f.name
            b1.save_model(d1name)
            b.save_model(dname)
            with open(dname, 'rt') as df:
                dtxt = df.read()
            with open(d1name, 'rt') as d1f:
                d1txt = d1f.read()
            self.assertEqual(dtxt, d1txt)

Guolin Ke's avatar
Guolin Ke committed
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
    @unittest.skipIf(not lgb.compat.PANDAS_INSTALLED, 'pandas is not installed')
    def test_add_features_from_different_sources(self):
        import pandas as pd
        n_row = 100
        n_col = 5
        X = np.random.random((n_row, n_col))
        xxs = [X, sparse.csr_matrix(X), pd.DataFrame(X)]
        names = ['col_%d' % i for i in range(n_col)]
        for x_1 in xxs:
            # test that method works even with free_raw_data=True
            d1 = lgb.Dataset(x_1, feature_name=names, free_raw_data=True).construct()
            d2 = lgb.Dataset(x_1, feature_name=names, free_raw_data=True).construct()
            d1.add_features_from(d2)
            self.assertIsNone(d1.data)

            # test that method works but sets raw data to None in case of immergeable data types
            d1 = lgb.Dataset(x_1, feature_name=names, free_raw_data=False).construct()
            d2 = lgb.Dataset([X[:n_row // 2, :], X[n_row // 2:, :]],
                             feature_name=names, free_raw_data=False).construct()
            d1.add_features_from(d2)
            self.assertIsNone(d1.data)

            # test that method works for different data types
            d1 = lgb.Dataset(x_1, feature_name=names, free_raw_data=False).construct()
            res_feature_names = [name for name in names]
            for idx, x_2 in enumerate(xxs, 2):
                original_type = type(d1.get_data())
                d2 = lgb.Dataset(x_2, feature_name=names, free_raw_data=False).construct()
                d1.add_features_from(d2)
                self.assertIsInstance(d1.get_data(), original_type)
                self.assertTupleEqual(d1.get_data().shape, (n_row, n_col * idx))
                res_feature_names += ['D{}_{}'.format(idx, name) for name in names]
                self.assertListEqual(d1.feature_name, res_feature_names)

259
    def test_cegb_affects_behavior(self):
260
        X = np.random.random((100, 5))
261
        X[:, [1, 3]] = 0
262
        y = np.random.random(100)
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
        names = ['col_%d' % i for i in range(5)]
        ds = lgb.Dataset(X, feature_name=names).construct()
        ds.set_label(y)
        base = lgb.Booster(train_set=ds)
        for k in range(10):
            base.update()
        with tempfile.NamedTemporaryFile() as f:
            basename = f.name
        base.save_model(basename)
        with open(basename, 'rt') as f:
            basetxt = f.read()
        # Set extremely harsh penalties, so CEGB will block most splits.
        cases = [{'cegb_penalty_feature_coupled': [50, 100, 10, 25, 30]},
                 {'cegb_penalty_feature_lazy': [1, 2, 3, 4, 5]},
                 {'cegb_penalty_split': 1}]
        for case in cases:
            booster = lgb.Booster(train_set=ds, params=case)
            for k in range(10):
                booster.update()
            with tempfile.NamedTemporaryFile() as f:
                casename = f.name
            booster.save_model(casename)
            with open(casename, 'rt') as f:
                casetxt = f.read()
            self.assertNotEqual(basetxt, casetxt)

    def test_cegb_scaling_equalities(self):
290
        X = np.random.random((100, 5))
291
        X[:, [1, 3]] = 0
292
        y = np.random.random(100)
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
        names = ['col_%d' % i for i in range(5)]
        ds = lgb.Dataset(X, feature_name=names).construct()
        ds.set_label(y)
        # Compare pairs of penalties, to ensure scaling works as intended
        pairs = [({'cegb_penalty_feature_coupled': [1, 2, 1, 2, 1]},
                  {'cegb_penalty_feature_coupled': [0.5, 1, 0.5, 1, 0.5], 'cegb_tradeoff': 2}),
                 ({'cegb_penalty_feature_lazy': [0.01, 0.02, 0.03, 0.04, 0.05]},
                  {'cegb_penalty_feature_lazy': [0.005, 0.01, 0.015, 0.02, 0.025], 'cegb_tradeoff': 2}),
                 ({'cegb_penalty_split': 1},
                  {'cegb_penalty_split': 2, 'cegb_tradeoff': 0.5})]
        for (p1, p2) in pairs:
            booster1 = lgb.Booster(train_set=ds, params=p1)
            booster2 = lgb.Booster(train_set=ds, params=p2)
            for k in range(10):
                booster1.update()
                booster2.update()
            with tempfile.NamedTemporaryFile() as f:
                p1name = f.name
            # Reset booster1's parameters to p2, so the parameter section of the file matches.
            booster1.reset_parameter(p2)
            booster1.save_model(p1name)
            with open(p1name, 'rt') as f:
                p1txt = f.read()
            with tempfile.NamedTemporaryFile() as f:
                p2name = f.name
            booster2.save_model(p2name)
            with open(p2name, 'rt') as f:
                p2txt = f.read()
321
            self.maxDiff = None
322
            self.assertEqual(p1txt, p2txt)
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341

    def test_consistent_state_for_dataset_fields(self):

        def check_asserts(data):
            np.testing.assert_allclose(data.label, data.get_label())
            np.testing.assert_allclose(data.label, data.get_field('label'))
            self.assertFalse(np.isnan(data.label[0]))
            self.assertFalse(np.isinf(data.label[1]))
            np.testing.assert_allclose(data.weight, data.get_weight())
            np.testing.assert_allclose(data.weight, data.get_field('weight'))
            self.assertFalse(np.isnan(data.weight[0]))
            self.assertFalse(np.isinf(data.weight[1]))
            np.testing.assert_allclose(data.init_score, data.get_init_score())
            np.testing.assert_allclose(data.init_score, data.get_field('init_score'))
            self.assertFalse(np.isnan(data.init_score[0]))
            self.assertFalse(np.isinf(data.init_score[1]))
            self.assertTrue(np.all(np.isclose([data.label[0], data.weight[0], data.init_score[0]],
                                              data.label[0])))
            self.assertAlmostEqual(data.label[1], data.weight[1])
342
            self.assertListEqual(data.feature_name, data.get_feature_name())
343

344
        X, y = load_breast_cancer(return_X_y=True)
345
346
347
        sequence = np.ones(y.shape[0])
        sequence[0] = np.nan
        sequence[1] = np.inf
348
349
350
351
        feature_names = ['f{0}'.format(i) for i in range(X.shape[1])]
        lgb_data = lgb.Dataset(X, sequence,
                               weight=sequence, init_score=sequence,
                               feature_name=feature_names).construct()
352
353
354
355
356
        check_asserts(lgb_data)
        lgb_data = lgb.Dataset(X, y).construct()
        lgb_data.set_label(sequence)
        lgb_data.set_weight(sequence)
        lgb_data.set_init_score(sequence)
357
        lgb_data.set_feature_name(feature_names)
358
        check_asserts(lgb_data)