advanced_example.py 6.82 KB
Newer Older
1
# coding: utf-8
2
import json
3
import pickle
4

5
import numpy as np
6
import pandas as pd
7
8
from sklearn.metrics import mean_squared_error

9
import lightgbm as lgb
10

11
print('Loading data...')
12
13
14
15
16
17
# load or create your dataset
df_train = pd.read_csv('../binary_classification/binary.train', header=None, sep='\t')
df_test = pd.read_csv('../binary_classification/binary.test', header=None, sep='\t')
W_train = pd.read_csv('../binary_classification/binary.train.weight', header=None)[0]
W_test = pd.read_csv('../binary_classification/binary.test.weight', header=None)[0]

18
19
20
21
y_train = df_train[0]
y_test = df_test[0]
X_train = df_train.drop(0, axis=1)
X_test = df_test.drop(0, axis=1)
22
23
24
25
26
27
28
29
30
31
32
33

num_train, num_feature = X_train.shape

# create dataset for lightgbm
# if you want to re-use data, remember to set free_raw_data=False
lgb_train = lgb.Dataset(X_train, y_train,
                        weight=W_train, free_raw_data=False)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train,
                       weight=W_test, free_raw_data=False)

# specify your configurations as a dict
params = {
wxchan's avatar
wxchan committed
34
35
36
37
38
39
40
    'boosting_type': 'gbdt',
    'objective': 'binary',
    'metric': 'binary_logloss',
    'num_leaves': 31,
    'learning_rate': 0.05,
    'feature_fraction': 0.9,
    'bagging_fraction': 0.8,
41
    'bagging_freq': 5,
wxchan's avatar
wxchan committed
42
    'verbose': 0
43
44
}

45
# generate feature names
46
47
feature_name = ['feature_' + str(col) for col in range(num_feature)]

48
print('Starting training...')
49
# feature_name and categorical_feature
50
51
52
gbm = lgb.train(params,
                lgb_train,
                num_boost_round=10,
wxchan's avatar
wxchan committed
53
                valid_sets=lgb_train,  # eval training data
54
55
                feature_name=feature_name,
                categorical_feature=[21])
56

57
print('Finished first 10 rounds...')
58
# check feature name
59
print('7th feature name is:', lgb_train.feature_name[6])
60

61
print('Saving model...')
62
63
64
# save model to file
gbm.save_model('model.txt')

65
print('Dumping model to JSON...')
Nikita Titov's avatar
Nikita Titov committed
66
# dump model to JSON (and save to file)
67
68
69
70
71
72
73
74
75
76
77
model_json = gbm.dump_model()

with open('model.json', 'w+') as f:
    json.dump(model_json, f, indent=4)

# feature names
print('Feature names:', gbm.feature_name())

# feature importances
print('Feature importances:', list(gbm.feature_importance()))

78
print('Loading model to predict...')
79
80
81
82
83
# load model to predict
bst = lgb.Booster(model_file='model.txt')
# can only predict with the best iteration (or the saving iteration)
y_pred = bst.predict(X_test)
# eval with loaded model
84
print("The rmse of loaded model's prediction is:", mean_squared_error(y_test, y_pred) ** 0.5)
85

86
print('Dumping and loading model with pickle...')
87
88
89
90
91
92
93
94
95
# dump model with pickle
with open('model.pkl', 'wb') as fout:
    pickle.dump(gbm, fout)
# load model with pickle to predict
with open('model.pkl', 'rb') as fin:
    pkl_bst = pickle.load(fin)
# can predict with any iteration when loaded in pickle way
y_pred = pkl_bst.predict(X_test, num_iteration=7)
# eval with loaded model
96
print("The rmse of pickled model's prediction is:", mean_squared_error(y_test, y_pred) ** 0.5)
97

98
99
100
101
102
103
104
105
106
107
# continue training
# init_model accepts:
# 1. model file name
# 2. Booster()
gbm = lgb.train(params,
                lgb_train,
                num_boost_round=10,
                init_model='model.txt',
                valid_sets=lgb_eval)

108
print('Finished 10 - 20 rounds with model file...')
109
110
111
112
113
114
115
116
117
118
119
120

# decay learning rates
# learning_rates accepts:
# 1. list/tuple with length = num_boost_round
# 2. function(curr_iter)
gbm = lgb.train(params,
                lgb_train,
                num_boost_round=10,
                init_model=gbm,
                learning_rates=lambda iter: 0.05 * (0.99 ** iter),
                valid_sets=lgb_eval)

121
print('Finished 20 - 30 rounds with decay learning rates...')
122

wxchan's avatar
wxchan committed
123
124
125
126
127
128
# change other parameters during training
gbm = lgb.train(params,
                lgb_train,
                num_boost_round=10,
                init_model=gbm,
                valid_sets=lgb_eval,
wxchan's avatar
wxchan committed
129
                callbacks=[lgb.reset_parameter(bagging_fraction=[0.7] * 5 + [0.6] * 5)])
wxchan's avatar
wxchan committed
130

131
print('Finished 30 - 40 rounds with changing bagging_fraction...')
wxchan's avatar
wxchan committed
132

wxchan's avatar
wxchan committed
133

134
135
136
# self-defined objective function
# f(preds: array, train_data: Dataset) -> grad: array, hess: array
# log likelihood loss
137
def loglikelihood(preds, train_data):
138
139
140
141
142
143
    labels = train_data.get_label()
    preds = 1. / (1. + np.exp(-preds))
    grad = preds - labels
    hess = preds * (1. - preds)
    return grad, hess

wxchan's avatar
wxchan committed
144

145
# self-defined eval metric
146
# f(preds: array, train_data: Dataset) -> name: string, eval_result: float, is_higher_better: bool
147
# binary error
148
149
150
151
# NOTE: when you do customized loss function, the default prediction value is margin
# This may make built-in evalution metric calculate wrong results
# For example, we are doing log likelihood loss, the prediction is score before logistic transformation
# Keep this in mind when you use the customization
152
153
def binary_error(preds, train_data):
    labels = train_data.get_label()
154
    preds = 1. / (1. + np.exp(-preds))
155
156
    return 'error', np.mean(labels != (preds > 0.5)), False

wxchan's avatar
wxchan committed
157

158
159
160
161
gbm = lgb.train(params,
                lgb_train,
                num_boost_round=10,
                init_model=gbm,
162
                fobj=loglikelihood,
163
164
165
                feval=binary_error,
                valid_sets=lgb_eval)

166
print('Finished 40 - 50 rounds with self-defined objective function and eval metric...')
167

168
169
170
171

# another self-defined eval metric
# f(preds: array, train_data: Dataset) -> name: string, eval_result: float, is_higher_better: bool
# accuracy
172
173
174
175
# NOTE: when you do customized loss function, the default prediction value is margin
# This may make built-in evalution metric calculate wrong results
# For example, we are doing log likelihood loss, the prediction is score before logistic transformation
# Keep this in mind when you use the customization
176
177
def accuracy(preds, train_data):
    labels = train_data.get_label()
178
    preds = 1. / (1. + np.exp(-preds))
179
180
181
182
183
184
185
186
    return 'accuracy', np.mean(labels == (preds > 0.5)), True


gbm = lgb.train(params,
                lgb_train,
                num_boost_round=10,
                init_model=gbm,
                fobj=loglikelihood,
187
                feval=[binary_error, accuracy],
188
189
190
191
192
                valid_sets=lgb_eval)

print('Finished 50 - 60 rounds with self-defined objective function '
      'and multiple self-defined eval metrics...')

193
print('Starting a new training job...')
wxchan's avatar
wxchan committed
194
195


196
197
198
199
200
201
# callback
def reset_metrics():
    def callback(env):
        lgb_eval_new = lgb.Dataset(X_test, y_test, reference=lgb_train)
        if env.iteration - env.begin_iteration == 5:
            print('Add a new valid dataset at iteration 5...')
202
            env.model.add_valid(lgb_eval_new, 'new_valid')
203
204
205
206
    callback.before_iteration = True
    callback.order = 0
    return callback

wxchan's avatar
wxchan committed
207

208
209
210
211
212
213
gbm = lgb.train(params,
                lgb_train,
                num_boost_round=10,
                valid_sets=lgb_train,
                callbacks=[reset_metrics()])

214
print('Finished first 10 rounds with callback function...')