sklearn_example.py 2.61 KB
Newer Older
wxchan's avatar
wxchan committed
1
2
# coding: utf-8
# pylint: disable = invalid-name, C0111
3
import numpy as np
wxchan's avatar
wxchan committed
4
import pandas as pd
5
6
import lightgbm as lgb

wxchan's avatar
wxchan committed
7
from sklearn.metrics import mean_squared_error
8
from sklearn.model_selection import GridSearchCV
wxchan's avatar
wxchan committed
9

10
print('Loading data...')
wxchan's avatar
wxchan committed
11
12
13
14
# load or create your dataset
df_train = pd.read_csv('../regression/regression.train', header=None, sep='\t')
df_test = pd.read_csv('../regression/regression.test', header=None, sep='\t')

15
16
17
18
y_train = df_train[0]
y_test = df_test[0]
X_train = df_train.drop(0, axis=1)
X_test = df_test.drop(0, axis=1)
wxchan's avatar
wxchan committed
19

20
print('Starting training...')
wxchan's avatar
wxchan committed
21
# train
22
gbm = lgb.LGBMRegressor(num_leaves=31,
wxchan's avatar
wxchan committed
23
                        learning_rate=0.05,
24
                        n_estimators=20)
wxchan's avatar
wxchan committed
25
26
gbm.fit(X_train, y_train,
        eval_set=[(X_test, y_test)],
27
28
        eval_metric='l1',
        early_stopping_rounds=5)
wxchan's avatar
wxchan committed
29

30
print('Starting predicting...')
wxchan's avatar
wxchan committed
31
# predict
32
y_pred = gbm.predict(X_test, num_iteration=gbm.best_iteration_)
wxchan's avatar
wxchan committed
33
34
# eval
print('The rmse of prediction is:', mean_squared_error(y_test, y_pred) ** 0.5)
35
36

# feature importances
37
print('Feature importances:', list(gbm.feature_importances_))
38

39
40
41
42
43
44
45
46

# self-defined eval metric
# f(y_true: array, y_pred: array) -> name: string, eval_result: float, is_higher_better: bool
# Root Mean Squared Logarithmic Error (RMSLE)
def rmsle(y_true, y_pred):
    return 'RMSLE', np.sqrt(np.mean(np.power(np.log1p(y_pred) - np.log1p(y_true), 2))), False


47
print('Starting training with custom eval function...')
48
49
50
51
52
53
# train
gbm.fit(X_train, y_train,
        eval_set=[(X_test, y_test)],
        eval_metric=rmsle,
        early_stopping_rounds=5)

54
55
56
57
58
59
60
61
62
63
64
65
66
67
68

# another self-defined eval metric
# f(y_true: array, y_pred: array) -> name: string, eval_result: float, is_higher_better: bool
# Relative Absolute Error (RAE)
def rae(y_true, y_pred):
    return 'RAE', np.sum(np.abs(y_pred - y_true)) / np.sum(np.abs(np.mean(y_true) - y_true)), False


print('Starting training with multiple custom eval functions...')
# train
gbm.fit(X_train, y_train,
        eval_set=[(X_test, y_test)],
        eval_metric=lambda y_true, y_pred: [rmsle(y_true, y_pred), rae(y_true, y_pred)],
        early_stopping_rounds=5)

69
print('Starting predicting...')
70
71
72
73
# predict
y_pred = gbm.predict(X_test, num_iteration=gbm.best_iteration_)
# eval
print('The rmsle of prediction is:', rmsle(y_test, y_pred)[1])
74
print('The rae of prediction is:', rae(y_test, y_pred)[1])
75

76
# other scikit-learn modules
77
78
79
80
81
82
83
estimator = lgb.LGBMRegressor(num_leaves=31)

param_grid = {
    'learning_rate': [0.01, 0.1, 1],
    'n_estimators': [20, 40]
}

84
gbm = GridSearchCV(estimator, param_grid, cv=3)
85
86
87
gbm.fit(X_train, y_train)

print('Best parameters found by grid search are:', gbm.best_params_)