"tests/git@developer.sourcefind.cn:tianlh/lightgbm-dcu.git" did not exist on "5e90255ee78bbee07d3a1afb01ffa22dfcfe9b6f"
test_basic.py 14.7 KB
Newer Older
wxchan's avatar
wxchan committed
1
# coding: utf-8
wxchan's avatar
wxchan committed
2
3
4
5
6
import os
import tempfile
import unittest

import lightgbm as lgb
wxchan's avatar
wxchan committed
7
import numpy as np
8
9

from scipy import sparse
10
from sklearn.datasets import dump_svmlight_file, load_svmlight_file
wxchan's avatar
wxchan committed
11
from sklearn.model_selection import train_test_split
wxchan's avatar
wxchan committed
12

13
14
from .utils import load_breast_cancer

wxchan's avatar
wxchan committed
15

wxchan's avatar
wxchan committed
16
class TestBasic(unittest.TestCase):
wxchan's avatar
wxchan committed
17

wxchan's avatar
wxchan committed
18
    def test(self):
19
        X_train, X_test, y_train, y_test = train_test_split(*load_breast_cancer(return_X_y=True),
20
                                                            test_size=0.1, random_state=2)
21
        train_data = lgb.Dataset(X_train, label=y_train)
wxchan's avatar
wxchan committed
22
        valid_data = train_data.create_valid(X_test, label=y_test)
wxchan's avatar
wxchan committed
23

wxchan's avatar
wxchan committed
24
        params = {
wxchan's avatar
wxchan committed
25
26
            "objective": "binary",
            "metric": "auc",
Guolin Ke's avatar
Guolin Ke committed
27
            "min_data": 10,
wxchan's avatar
wxchan committed
28
            "num_leaves": 15,
29
            "verbose": -1,
30
            "num_threads": 1,
31
32
            "max_bin": 255,
            "gpu_use_dp": True
wxchan's avatar
wxchan committed
33
34
35
        }
        bst = lgb.Booster(params, train_data)
        bst.add_valid(valid_data, "valid_1")
wxchan's avatar
wxchan committed
36

37
        for i in range(20):
wxchan's avatar
wxchan committed
38
39
40
            bst.update()
            if i % 10 == 0:
                print(bst.eval_train(), bst.eval_valid())
41

42
43
        self.assertEqual(bst.current_iteration(), 20)
        self.assertEqual(bst.num_trees(), 20)
44
        self.assertEqual(bst.num_model_per_iteration(), 1)
45
46
        self.assertAlmostEqual(bst.lower_bound(), -2.9040190126976606)
        self.assertAlmostEqual(bst.upper_bound(), 3.3182142872462883)
47

wxchan's avatar
wxchan committed
48
49
50
        bst.save_model("model.txt")
        pred_from_matr = bst.predict(X_test)
        with tempfile.NamedTemporaryFile() as f:
51
52
            tname = f.name
        with open(tname, "w+b") as f:
Guolin Ke's avatar
Guolin Ke committed
53
            dump_svmlight_file(X_test, y_test, f)
54
55
        pred_from_file = bst.predict(tname)
        os.remove(tname)
56
        np.testing.assert_allclose(pred_from_matr, pred_from_file)
cbecker's avatar
cbecker committed
57

wxchan's avatar
wxchan committed
58
        # check saved model persistence
59
        bst = lgb.Booster(params, model_file="model.txt")
60
        os.remove("model.txt")
61
        pred_from_model_file = bst.predict(X_test)
62
63
        # we need to check the consistency of model file here, so test for exact equal
        np.testing.assert_array_equal(pred_from_matr, pred_from_model_file)
cbecker's avatar
cbecker committed
64
65

        # check early stopping is working. Make it stop very early, so the scores should be very close to zero
66
        pred_parameter = {"pred_early_stop": True, "pred_early_stop_freq": 5, "pred_early_stop_margin": 1.5}
67
        pred_early_stopping = bst.predict(X_test, **pred_parameter)
68
69
        # scores likely to be different, but prediction should still be the same
        np.testing.assert_array_equal(np.sign(pred_from_matr), np.sign(pred_early_stopping))
70

71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
        # test that shape is checked during prediction
        bad_X_test = X_test[:, 1:]
        bad_shape_error_msg = "The number of features in data*"
        np.testing.assert_raises_regex(lgb.basic.LightGBMError, bad_shape_error_msg,
                                       bst.predict, bad_X_test)
        np.testing.assert_raises_regex(lgb.basic.LightGBMError, bad_shape_error_msg,
                                       bst.predict, sparse.csr_matrix(bad_X_test))
        np.testing.assert_raises_regex(lgb.basic.LightGBMError, bad_shape_error_msg,
                                       bst.predict, sparse.csc_matrix(bad_X_test))
        with open(tname, "w+b") as f:
            dump_svmlight_file(bad_X_test, y_test, f)
        np.testing.assert_raises_regex(lgb.basic.LightGBMError, bad_shape_error_msg,
                                       bst.predict, tname)
        with open(tname, "w+b") as f:
            dump_svmlight_file(X_test, y_test, f, zero_based=False)
        np.testing.assert_raises_regex(lgb.basic.LightGBMError, bad_shape_error_msg,
                                       bst.predict, tname)
        os.remove(tname)

90
    def test_chunked_dataset(self):
91
        X_train, X_test, y_train, y_test = train_test_split(*load_breast_cancer(return_X_y=True), test_size=0.1, random_state=2)
92
93
94
95
96
97
98
99
100

        chunk_size = X_train.shape[0] // 10 + 1
        X_train = [X_train[i * chunk_size:(i + 1) * chunk_size, :] for i in range(X_train.shape[0] // chunk_size + 1)]
        X_test = [X_test[i * chunk_size:(i + 1) * chunk_size, :] for i in range(X_test.shape[0] // chunk_size + 1)]

        train_data = lgb.Dataset(X_train, label=y_train, params={"bin_construct_sample_cnt": 100})
        valid_data = train_data.create_valid(X_test, label=y_test, params={"bin_construct_sample_cnt": 100})
        train_data.construct()
        valid_data.construct()
101
102

    def test_subset_group(self):
103
104
105
106
        X_train, y_train = load_svmlight_file(os.path.join(os.path.dirname(os.path.realpath(__file__)),
                                                           '../../examples/lambdarank/rank.train'))
        q_train = np.loadtxt(os.path.join(os.path.dirname(os.path.realpath(__file__)),
                                          '../../examples/lambdarank/rank.train.query'))
107
108
        lgb_train = lgb.Dataset(X_train, y_train, group=q_train)
        self.assertEqual(len(lgb_train.get_group()), 201)
109
        subset = lgb_train.subset(list(range(10))).construct()
110
111
112
113
        subset_group = subset.get_group()
        self.assertEqual(len(subset_group), 2)
        self.assertEqual(subset_group[0], 1)
        self.assertEqual(subset_group[1], 9)
114
115

    def test_add_features_throws_if_num_data_unequal(self):
116
117
        X1 = np.random.random((100, 1))
        X2 = np.random.random((10, 1))
118
119
120
121
122
123
        d1 = lgb.Dataset(X1).construct()
        d2 = lgb.Dataset(X2).construct()
        with self.assertRaises(lgb.basic.LightGBMError):
            d1.add_features_from(d2)

    def test_add_features_throws_if_datasets_unconstructed(self):
124
125
        X1 = np.random.random((100, 1))
        X2 = np.random.random((100, 1))
126
127
128
129
130
131
132
133
134
135
136
137
138
139
        with self.assertRaises(ValueError):
            d1 = lgb.Dataset(X1)
            d2 = lgb.Dataset(X2)
            d1.add_features_from(d2)
        with self.assertRaises(ValueError):
            d1 = lgb.Dataset(X1).construct()
            d2 = lgb.Dataset(X2)
            d1.add_features_from(d2)
        with self.assertRaises(ValueError):
            d1 = lgb.Dataset(X1)
            d2 = lgb.Dataset(X2).construct()
            d1.add_features_from(d2)

    def test_add_features_equal_data_on_alternating_used_unused(self):
140
141
        self.maxDiff = None
        X = np.random.random((100, 5))
142
        X[:, [1, 3]] = 0
143
        names = ['col_%d' % i for i in range(5)]
144
145
146
147
148
149
        for j in range(1, 5):
            d1 = lgb.Dataset(X[:, :j], feature_name=names[:j]).construct()
            d2 = lgb.Dataset(X[:, j:], feature_name=names[j:]).construct()
            d1.add_features_from(d2)
            with tempfile.NamedTemporaryFile() as f:
                d1name = f.name
150
            d1._dump_text(d1name)
151
152
153
            d = lgb.Dataset(X, feature_name=names).construct()
            with tempfile.NamedTemporaryFile() as f:
                dname = f.name
154
            d._dump_text(dname)
155
156
157
158
159
160
161
162
163
            with open(d1name, 'rt') as d1f:
                d1txt = d1f.read()
            with open(dname, 'rt') as df:
                dtxt = df.read()
            os.remove(dname)
            os.remove(d1name)
            self.assertEqual(dtxt, d1txt)

    def test_add_features_same_booster_behaviour(self):
164
165
        self.maxDiff = None
        X = np.random.random((100, 5))
166
        X[:, [1, 3]] = 0
167
        names = ['col_%d' % i for i in range(5)]
168
169
170
171
172
        for j in range(1, 5):
            d1 = lgb.Dataset(X[:, :j], feature_name=names[:j]).construct()
            d2 = lgb.Dataset(X[:, j:], feature_name=names[j:]).construct()
            d1.add_features_from(d2)
            d = lgb.Dataset(X, feature_name=names).construct()
173
            y = np.random.random(100)
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
            d1.set_label(y)
            d.set_label(y)
            b1 = lgb.Booster(train_set=d1)
            b = lgb.Booster(train_set=d)
            for k in range(10):
                b.update()
                b1.update()
            with tempfile.NamedTemporaryFile() as df:
                dname = df.name
            with tempfile.NamedTemporaryFile() as d1f:
                d1name = d1f.name
            b1.save_model(d1name)
            b.save_model(dname)
            with open(dname, 'rt') as df:
                dtxt = df.read()
            with open(d1name, 'rt') as d1f:
                d1txt = d1f.read()
            self.assertEqual(dtxt, d1txt)

Guolin Ke's avatar
Guolin Ke committed
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
    @unittest.skipIf(not lgb.compat.PANDAS_INSTALLED, 'pandas is not installed')
    def test_add_features_from_different_sources(self):
        import pandas as pd
        n_row = 100
        n_col = 5
        X = np.random.random((n_row, n_col))
        xxs = [X, sparse.csr_matrix(X), pd.DataFrame(X)]
        names = ['col_%d' % i for i in range(n_col)]
        for x_1 in xxs:
            # test that method works even with free_raw_data=True
            d1 = lgb.Dataset(x_1, feature_name=names, free_raw_data=True).construct()
            d2 = lgb.Dataset(x_1, feature_name=names, free_raw_data=True).construct()
            d1.add_features_from(d2)
            self.assertIsNone(d1.data)

            # test that method works but sets raw data to None in case of immergeable data types
            d1 = lgb.Dataset(x_1, feature_name=names, free_raw_data=False).construct()
            d2 = lgb.Dataset([X[:n_row // 2, :], X[n_row // 2:, :]],
                             feature_name=names, free_raw_data=False).construct()
            d1.add_features_from(d2)
            self.assertIsNone(d1.data)

            # test that method works for different data types
            d1 = lgb.Dataset(x_1, feature_name=names, free_raw_data=False).construct()
            res_feature_names = [name for name in names]
            idx = 1
            for idx, x_2 in enumerate(xxs, 2):
                original_type = type(d1.get_data())
                d2 = lgb.Dataset(x_2, feature_name=names, free_raw_data=False).construct()
                d1.add_features_from(d2)
                self.assertIsInstance(d1.get_data(), original_type)
                self.assertTupleEqual(d1.get_data().shape, (n_row, n_col * idx))
                res_feature_names += ['D{}_{}'.format(idx, name) for name in names]
                idx += 1
                self.assertListEqual(d1.feature_name, res_feature_names)

229
    def test_cegb_affects_behavior(self):
230
        X = np.random.random((100, 5))
231
        X[:, [1, 3]] = 0
232
        y = np.random.random(100)
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
        names = ['col_%d' % i for i in range(5)]
        ds = lgb.Dataset(X, feature_name=names).construct()
        ds.set_label(y)
        base = lgb.Booster(train_set=ds)
        for k in range(10):
            base.update()
        with tempfile.NamedTemporaryFile() as f:
            basename = f.name
        base.save_model(basename)
        with open(basename, 'rt') as f:
            basetxt = f.read()
        # Set extremely harsh penalties, so CEGB will block most splits.
        cases = [{'cegb_penalty_feature_coupled': [50, 100, 10, 25, 30]},
                 {'cegb_penalty_feature_lazy': [1, 2, 3, 4, 5]},
                 {'cegb_penalty_split': 1}]
        for case in cases:
            booster = lgb.Booster(train_set=ds, params=case)
            for k in range(10):
                booster.update()
            with tempfile.NamedTemporaryFile() as f:
                casename = f.name
            booster.save_model(casename)
            with open(casename, 'rt') as f:
                casetxt = f.read()
            self.assertNotEqual(basetxt, casetxt)

    def test_cegb_scaling_equalities(self):
260
        X = np.random.random((100, 5))
261
        X[:, [1, 3]] = 0
262
        y = np.random.random(100)
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
        names = ['col_%d' % i for i in range(5)]
        ds = lgb.Dataset(X, feature_name=names).construct()
        ds.set_label(y)
        # Compare pairs of penalties, to ensure scaling works as intended
        pairs = [({'cegb_penalty_feature_coupled': [1, 2, 1, 2, 1]},
                  {'cegb_penalty_feature_coupled': [0.5, 1, 0.5, 1, 0.5], 'cegb_tradeoff': 2}),
                 ({'cegb_penalty_feature_lazy': [0.01, 0.02, 0.03, 0.04, 0.05]},
                  {'cegb_penalty_feature_lazy': [0.005, 0.01, 0.015, 0.02, 0.025], 'cegb_tradeoff': 2}),
                 ({'cegb_penalty_split': 1},
                  {'cegb_penalty_split': 2, 'cegb_tradeoff': 0.5})]
        for (p1, p2) in pairs:
            booster1 = lgb.Booster(train_set=ds, params=p1)
            booster2 = lgb.Booster(train_set=ds, params=p2)
            for k in range(10):
                booster1.update()
                booster2.update()
            with tempfile.NamedTemporaryFile() as f:
                p1name = f.name
            # Reset booster1's parameters to p2, so the parameter section of the file matches.
            booster1.reset_parameter(p2)
            booster1.save_model(p1name)
            with open(p1name, 'rt') as f:
                p1txt = f.read()
            with tempfile.NamedTemporaryFile() as f:
                p2name = f.name
            booster2.save_model(p2name)
            with open(p2name, 'rt') as f:
                p2txt = f.read()
291
            self.maxDiff = None
292
            self.assertEqual(p1txt, p2txt)
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311

    def test_consistent_state_for_dataset_fields(self):

        def check_asserts(data):
            np.testing.assert_allclose(data.label, data.get_label())
            np.testing.assert_allclose(data.label, data.get_field('label'))
            self.assertFalse(np.isnan(data.label[0]))
            self.assertFalse(np.isinf(data.label[1]))
            np.testing.assert_allclose(data.weight, data.get_weight())
            np.testing.assert_allclose(data.weight, data.get_field('weight'))
            self.assertFalse(np.isnan(data.weight[0]))
            self.assertFalse(np.isinf(data.weight[1]))
            np.testing.assert_allclose(data.init_score, data.get_init_score())
            np.testing.assert_allclose(data.init_score, data.get_field('init_score'))
            self.assertFalse(np.isnan(data.init_score[0]))
            self.assertFalse(np.isinf(data.init_score[1]))
            self.assertTrue(np.all(np.isclose([data.label[0], data.weight[0], data.init_score[0]],
                                              data.label[0])))
            self.assertAlmostEqual(data.label[1], data.weight[1])
312
            self.assertListEqual(data.feature_name, data.get_feature_name())
313

314
        X, y = load_breast_cancer(return_X_y=True)
315
316
317
        sequence = np.ones(y.shape[0])
        sequence[0] = np.nan
        sequence[1] = np.inf
318
319
320
321
        feature_names = ['f{0}'.format(i) for i in range(X.shape[1])]
        lgb_data = lgb.Dataset(X, sequence,
                               weight=sequence, init_score=sequence,
                               feature_name=feature_names).construct()
322
323
324
325
326
        check_asserts(lgb_data)
        lgb_data = lgb.Dataset(X, y).construct()
        lgb_data.set_label(sequence)
        lgb_data.set_weight(sequence)
        lgb_data.set_init_score(sequence)
327
        lgb_data.set_feature_name(feature_names)
328
        check_asserts(lgb_data)