test_basic.py 14.7 KB
Newer Older
wxchan's avatar
wxchan committed
1
# coding: utf-8
wxchan's avatar
wxchan committed
2
3
4
5
6
import os
import tempfile
import unittest

import lightgbm as lgb
wxchan's avatar
wxchan committed
7
import numpy as np
8
9

from scipy import sparse
10
from sklearn.datasets import load_breast_cancer, dump_svmlight_file, load_svmlight_file
wxchan's avatar
wxchan committed
11
from sklearn.model_selection import train_test_split
wxchan's avatar
wxchan committed
12

wxchan's avatar
wxchan committed
13

wxchan's avatar
wxchan committed
14
class TestBasic(unittest.TestCase):
wxchan's avatar
wxchan committed
15

wxchan's avatar
wxchan committed
16
    def test(self):
17
        X_train, X_test, y_train, y_test = train_test_split(*load_breast_cancer(return_X_y=True),
18
                                                            test_size=0.1, random_state=2)
19
        train_data = lgb.Dataset(X_train, label=y_train)
wxchan's avatar
wxchan committed
20
        valid_data = train_data.create_valid(X_test, label=y_test)
wxchan's avatar
wxchan committed
21

wxchan's avatar
wxchan committed
22
        params = {
wxchan's avatar
wxchan committed
23
24
            "objective": "binary",
            "metric": "auc",
Guolin Ke's avatar
Guolin Ke committed
25
            "min_data": 10,
wxchan's avatar
wxchan committed
26
            "num_leaves": 15,
27
            "verbose": -1,
28
            "num_threads": 1,
29
30
            "max_bin": 255,
            "gpu_use_dp": True
wxchan's avatar
wxchan committed
31
32
33
        }
        bst = lgb.Booster(params, train_data)
        bst.add_valid(valid_data, "valid_1")
wxchan's avatar
wxchan committed
34

35
        for i in range(20):
wxchan's avatar
wxchan committed
36
37
38
            bst.update()
            if i % 10 == 0:
                print(bst.eval_train(), bst.eval_valid())
39

40
41
        self.assertEqual(bst.current_iteration(), 20)
        self.assertEqual(bst.num_trees(), 20)
42
        self.assertEqual(bst.num_model_per_iteration(), 1)
43
44
        self.assertAlmostEqual(bst.lower_bound(), -2.9040190126976606)
        self.assertAlmostEqual(bst.upper_bound(), 3.3182142872462883)
45

wxchan's avatar
wxchan committed
46
47
48
        bst.save_model("model.txt")
        pred_from_matr = bst.predict(X_test)
        with tempfile.NamedTemporaryFile() as f:
49
50
            tname = f.name
        with open(tname, "w+b") as f:
Guolin Ke's avatar
Guolin Ke committed
51
            dump_svmlight_file(X_test, y_test, f)
52
53
        pred_from_file = bst.predict(tname)
        os.remove(tname)
54
        np.testing.assert_allclose(pred_from_matr, pred_from_file)
cbecker's avatar
cbecker committed
55

wxchan's avatar
wxchan committed
56
        # check saved model persistence
57
        bst = lgb.Booster(params, model_file="model.txt")
58
        os.remove("model.txt")
59
        pred_from_model_file = bst.predict(X_test)
60
61
        # we need to check the consistency of model file here, so test for exact equal
        np.testing.assert_array_equal(pred_from_matr, pred_from_model_file)
cbecker's avatar
cbecker committed
62
63

        # check early stopping is working. Make it stop very early, so the scores should be very close to zero
64
        pred_parameter = {"pred_early_stop": True, "pred_early_stop_freq": 5, "pred_early_stop_margin": 1.5}
65
        pred_early_stopping = bst.predict(X_test, **pred_parameter)
66
67
        # scores likely to be different, but prediction should still be the same
        np.testing.assert_array_equal(np.sign(pred_from_matr), np.sign(pred_early_stopping))
68

69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
        # test that shape is checked during prediction
        bad_X_test = X_test[:, 1:]
        bad_shape_error_msg = "The number of features in data*"
        np.testing.assert_raises_regex(lgb.basic.LightGBMError, bad_shape_error_msg,
                                       bst.predict, bad_X_test)
        np.testing.assert_raises_regex(lgb.basic.LightGBMError, bad_shape_error_msg,
                                       bst.predict, sparse.csr_matrix(bad_X_test))
        np.testing.assert_raises_regex(lgb.basic.LightGBMError, bad_shape_error_msg,
                                       bst.predict, sparse.csc_matrix(bad_X_test))
        with open(tname, "w+b") as f:
            dump_svmlight_file(bad_X_test, y_test, f)
        np.testing.assert_raises_regex(lgb.basic.LightGBMError, bad_shape_error_msg,
                                       bst.predict, tname)
        with open(tname, "w+b") as f:
            dump_svmlight_file(X_test, y_test, f, zero_based=False)
        np.testing.assert_raises_regex(lgb.basic.LightGBMError, bad_shape_error_msg,
                                       bst.predict, tname)
        os.remove(tname)

88
    def test_chunked_dataset(self):
89
        X_train, X_test, y_train, y_test = train_test_split(*load_breast_cancer(return_X_y=True), test_size=0.1, random_state=2)
90
91
92
93
94
95
96
97
98

        chunk_size = X_train.shape[0] // 10 + 1
        X_train = [X_train[i * chunk_size:(i + 1) * chunk_size, :] for i in range(X_train.shape[0] // chunk_size + 1)]
        X_test = [X_test[i * chunk_size:(i + 1) * chunk_size, :] for i in range(X_test.shape[0] // chunk_size + 1)]

        train_data = lgb.Dataset(X_train, label=y_train, params={"bin_construct_sample_cnt": 100})
        valid_data = train_data.create_valid(X_test, label=y_test, params={"bin_construct_sample_cnt": 100})
        train_data.construct()
        valid_data.construct()
99
100

    def test_subset_group(self):
101
102
103
104
        X_train, y_train = load_svmlight_file(os.path.join(os.path.dirname(os.path.realpath(__file__)),
                                                           '../../examples/lambdarank/rank.train'))
        q_train = np.loadtxt(os.path.join(os.path.dirname(os.path.realpath(__file__)),
                                          '../../examples/lambdarank/rank.train.query'))
105
106
        lgb_train = lgb.Dataset(X_train, y_train, group=q_train)
        self.assertEqual(len(lgb_train.get_group()), 201)
107
        subset = lgb_train.subset(list(range(10))).construct()
108
109
110
111
        subset_group = subset.get_group()
        self.assertEqual(len(subset_group), 2)
        self.assertEqual(subset_group[0], 1)
        self.assertEqual(subset_group[1], 9)
112
113

    def test_add_features_throws_if_num_data_unequal(self):
114
115
        X1 = np.random.random((100, 1))
        X2 = np.random.random((10, 1))
116
117
118
119
120
121
        d1 = lgb.Dataset(X1).construct()
        d2 = lgb.Dataset(X2).construct()
        with self.assertRaises(lgb.basic.LightGBMError):
            d1.add_features_from(d2)

    def test_add_features_throws_if_datasets_unconstructed(self):
122
123
        X1 = np.random.random((100, 1))
        X2 = np.random.random((100, 1))
124
125
126
127
128
129
130
131
132
133
134
135
136
137
        with self.assertRaises(ValueError):
            d1 = lgb.Dataset(X1)
            d2 = lgb.Dataset(X2)
            d1.add_features_from(d2)
        with self.assertRaises(ValueError):
            d1 = lgb.Dataset(X1).construct()
            d2 = lgb.Dataset(X2)
            d1.add_features_from(d2)
        with self.assertRaises(ValueError):
            d1 = lgb.Dataset(X1)
            d2 = lgb.Dataset(X2).construct()
            d1.add_features_from(d2)

    def test_add_features_equal_data_on_alternating_used_unused(self):
138
139
        self.maxDiff = None
        X = np.random.random((100, 5))
140
        X[:, [1, 3]] = 0
141
        names = ['col_%d' % i for i in range(5)]
142
143
144
145
146
147
        for j in range(1, 5):
            d1 = lgb.Dataset(X[:, :j], feature_name=names[:j]).construct()
            d2 = lgb.Dataset(X[:, j:], feature_name=names[j:]).construct()
            d1.add_features_from(d2)
            with tempfile.NamedTemporaryFile() as f:
                d1name = f.name
148
            d1._dump_text(d1name)
149
150
151
            d = lgb.Dataset(X, feature_name=names).construct()
            with tempfile.NamedTemporaryFile() as f:
                dname = f.name
152
            d._dump_text(dname)
153
154
155
156
157
158
159
160
161
            with open(d1name, 'rt') as d1f:
                d1txt = d1f.read()
            with open(dname, 'rt') as df:
                dtxt = df.read()
            os.remove(dname)
            os.remove(d1name)
            self.assertEqual(dtxt, d1txt)

    def test_add_features_same_booster_behaviour(self):
162
163
        self.maxDiff = None
        X = np.random.random((100, 5))
164
        X[:, [1, 3]] = 0
165
        names = ['col_%d' % i for i in range(5)]
166
167
168
169
170
        for j in range(1, 5):
            d1 = lgb.Dataset(X[:, :j], feature_name=names[:j]).construct()
            d2 = lgb.Dataset(X[:, j:], feature_name=names[j:]).construct()
            d1.add_features_from(d2)
            d = lgb.Dataset(X, feature_name=names).construct()
171
            y = np.random.random(100)
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
            d1.set_label(y)
            d.set_label(y)
            b1 = lgb.Booster(train_set=d1)
            b = lgb.Booster(train_set=d)
            for k in range(10):
                b.update()
                b1.update()
            with tempfile.NamedTemporaryFile() as df:
                dname = df.name
            with tempfile.NamedTemporaryFile() as d1f:
                d1name = d1f.name
            b1.save_model(d1name)
            b.save_model(dname)
            with open(dname, 'rt') as df:
                dtxt = df.read()
            with open(d1name, 'rt') as d1f:
                d1txt = d1f.read()
            self.assertEqual(dtxt, d1txt)

Guolin Ke's avatar
Guolin Ke committed
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
    @unittest.skipIf(not lgb.compat.PANDAS_INSTALLED, 'pandas is not installed')
    def test_add_features_from_different_sources(self):
        import pandas as pd
        n_row = 100
        n_col = 5
        X = np.random.random((n_row, n_col))
        xxs = [X, sparse.csr_matrix(X), pd.DataFrame(X)]
        names = ['col_%d' % i for i in range(n_col)]
        for x_1 in xxs:
            # test that method works even with free_raw_data=True
            d1 = lgb.Dataset(x_1, feature_name=names, free_raw_data=True).construct()
            d2 = lgb.Dataset(x_1, feature_name=names, free_raw_data=True).construct()
            d1.add_features_from(d2)
            self.assertIsNone(d1.data)

            # test that method works but sets raw data to None in case of immergeable data types
            d1 = lgb.Dataset(x_1, feature_name=names, free_raw_data=False).construct()
            d2 = lgb.Dataset([X[:n_row // 2, :], X[n_row // 2:, :]],
                             feature_name=names, free_raw_data=False).construct()
            d1.add_features_from(d2)
            self.assertIsNone(d1.data)

            # test that method works for different data types
            d1 = lgb.Dataset(x_1, feature_name=names, free_raw_data=False).construct()
            res_feature_names = [name for name in names]
            idx = 1
            for idx, x_2 in enumerate(xxs, 2):
                original_type = type(d1.get_data())
                d2 = lgb.Dataset(x_2, feature_name=names, free_raw_data=False).construct()
                d1.add_features_from(d2)
                self.assertIsInstance(d1.get_data(), original_type)
                self.assertTupleEqual(d1.get_data().shape, (n_row, n_col * idx))
                res_feature_names += ['D{}_{}'.format(idx, name) for name in names]
                idx += 1
                self.assertListEqual(d1.feature_name, res_feature_names)

227
    def test_cegb_affects_behavior(self):
228
        X = np.random.random((100, 5))
229
        X[:, [1, 3]] = 0
230
        y = np.random.random(100)
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
        names = ['col_%d' % i for i in range(5)]
        ds = lgb.Dataset(X, feature_name=names).construct()
        ds.set_label(y)
        base = lgb.Booster(train_set=ds)
        for k in range(10):
            base.update()
        with tempfile.NamedTemporaryFile() as f:
            basename = f.name
        base.save_model(basename)
        with open(basename, 'rt') as f:
            basetxt = f.read()
        # Set extremely harsh penalties, so CEGB will block most splits.
        cases = [{'cegb_penalty_feature_coupled': [50, 100, 10, 25, 30]},
                 {'cegb_penalty_feature_lazy': [1, 2, 3, 4, 5]},
                 {'cegb_penalty_split': 1}]
        for case in cases:
            booster = lgb.Booster(train_set=ds, params=case)
            for k in range(10):
                booster.update()
            with tempfile.NamedTemporaryFile() as f:
                casename = f.name
            booster.save_model(casename)
            with open(casename, 'rt') as f:
                casetxt = f.read()
            self.assertNotEqual(basetxt, casetxt)

    def test_cegb_scaling_equalities(self):
258
        X = np.random.random((100, 5))
259
        X[:, [1, 3]] = 0
260
        y = np.random.random(100)
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
        names = ['col_%d' % i for i in range(5)]
        ds = lgb.Dataset(X, feature_name=names).construct()
        ds.set_label(y)
        # Compare pairs of penalties, to ensure scaling works as intended
        pairs = [({'cegb_penalty_feature_coupled': [1, 2, 1, 2, 1]},
                  {'cegb_penalty_feature_coupled': [0.5, 1, 0.5, 1, 0.5], 'cegb_tradeoff': 2}),
                 ({'cegb_penalty_feature_lazy': [0.01, 0.02, 0.03, 0.04, 0.05]},
                  {'cegb_penalty_feature_lazy': [0.005, 0.01, 0.015, 0.02, 0.025], 'cegb_tradeoff': 2}),
                 ({'cegb_penalty_split': 1},
                  {'cegb_penalty_split': 2, 'cegb_tradeoff': 0.5})]
        for (p1, p2) in pairs:
            booster1 = lgb.Booster(train_set=ds, params=p1)
            booster2 = lgb.Booster(train_set=ds, params=p2)
            for k in range(10):
                booster1.update()
                booster2.update()
            with tempfile.NamedTemporaryFile() as f:
                p1name = f.name
            # Reset booster1's parameters to p2, so the parameter section of the file matches.
            booster1.reset_parameter(p2)
            booster1.save_model(p1name)
            with open(p1name, 'rt') as f:
                p1txt = f.read()
            with tempfile.NamedTemporaryFile() as f:
                p2name = f.name
            booster2.save_model(p2name)
            with open(p2name, 'rt') as f:
                p2txt = f.read()
289
            self.maxDiff = None
290
            self.assertEqual(p1txt, p2txt)
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309

    def test_consistent_state_for_dataset_fields(self):

        def check_asserts(data):
            np.testing.assert_allclose(data.label, data.get_label())
            np.testing.assert_allclose(data.label, data.get_field('label'))
            self.assertFalse(np.isnan(data.label[0]))
            self.assertFalse(np.isinf(data.label[1]))
            np.testing.assert_allclose(data.weight, data.get_weight())
            np.testing.assert_allclose(data.weight, data.get_field('weight'))
            self.assertFalse(np.isnan(data.weight[0]))
            self.assertFalse(np.isinf(data.weight[1]))
            np.testing.assert_allclose(data.init_score, data.get_init_score())
            np.testing.assert_allclose(data.init_score, data.get_field('init_score'))
            self.assertFalse(np.isnan(data.init_score[0]))
            self.assertFalse(np.isinf(data.init_score[1]))
            self.assertTrue(np.all(np.isclose([data.label[0], data.weight[0], data.init_score[0]],
                                              data.label[0])))
            self.assertAlmostEqual(data.label[1], data.weight[1])
310
            self.assertListEqual(data.feature_name, data.get_feature_name())
311

312
        X, y = load_breast_cancer(return_X_y=True)
313
314
315
        sequence = np.ones(y.shape[0])
        sequence[0] = np.nan
        sequence[1] = np.inf
316
317
318
319
        feature_names = ['f{0}'.format(i) for i in range(X.shape[1])]
        lgb_data = lgb.Dataset(X, sequence,
                               weight=sequence, init_score=sequence,
                               feature_name=feature_names).construct()
320
321
322
323
324
        check_asserts(lgb_data)
        lgb_data = lgb.Dataset(X, y).construct()
        lgb_data.set_label(sequence)
        lgb_data.set_weight(sequence)
        lgb_data.set_init_score(sequence)
325
        lgb_data.set_feature_name(feature_names)
326
        check_asserts(lgb_data)