sklearn_example.py 2.59 KB
Newer Older
wxchan's avatar
wxchan committed
1
# coding: utf-8
2
import numpy as np
wxchan's avatar
wxchan committed
3
4
import pandas as pd
from sklearn.metrics import mean_squared_error
5
from sklearn.model_selection import GridSearchCV
wxchan's avatar
wxchan committed
6

7
8
import lightgbm as lgb

9
print('Loading data...')
wxchan's avatar
wxchan committed
10
11
12
13
# load or create your dataset
df_train = pd.read_csv('../regression/regression.train', header=None, sep='\t')
df_test = pd.read_csv('../regression/regression.test', header=None, sep='\t')

14
15
16
17
y_train = df_train[0]
y_test = df_test[0]
X_train = df_train.drop(0, axis=1)
X_test = df_test.drop(0, axis=1)
wxchan's avatar
wxchan committed
18

19
print('Starting training...')
wxchan's avatar
wxchan committed
20
# train
21
gbm = lgb.LGBMRegressor(num_leaves=31,
wxchan's avatar
wxchan committed
22
                        learning_rate=0.05,
23
                        n_estimators=20)
wxchan's avatar
wxchan committed
24
25
gbm.fit(X_train, y_train,
        eval_set=[(X_test, y_test)],
26
27
        eval_metric='l1',
        early_stopping_rounds=5)
wxchan's avatar
wxchan committed
28

29
print('Starting predicting...')
wxchan's avatar
wxchan committed
30
# predict
31
y_pred = gbm.predict(X_test, num_iteration=gbm.best_iteration_)
wxchan's avatar
wxchan committed
32
# eval
33
34
rmse_test = mean_squared_error(y_test, y_pred) ** 0.5
print(f'The RMSE of prediction is: {rmse_test}')
35
36

# feature importances
37
print(f'Feature importances: {list(gbm.feature_importances_)}')
38

39
40
41
42
43
44
45
46

# self-defined eval metric
# f(y_true: array, y_pred: array) -> name: string, eval_result: float, is_higher_better: bool
# Root Mean Squared Logarithmic Error (RMSLE)
def rmsle(y_true, y_pred):
    return 'RMSLE', np.sqrt(np.mean(np.power(np.log1p(y_pred) - np.log1p(y_true), 2))), False


47
print('Starting training with custom eval function...')
48
49
50
51
52
53
# train
gbm.fit(X_train, y_train,
        eval_set=[(X_test, y_test)],
        eval_metric=rmsle,
        early_stopping_rounds=5)

54
55
56
57
58
59
60
61
62
63
64
65

# another self-defined eval metric
# f(y_true: array, y_pred: array) -> name: string, eval_result: float, is_higher_better: bool
# Relative Absolute Error (RAE)
def rae(y_true, y_pred):
    return 'RAE', np.sum(np.abs(y_pred - y_true)) / np.sum(np.abs(np.mean(y_true) - y_true)), False


print('Starting training with multiple custom eval functions...')
# train
gbm.fit(X_train, y_train,
        eval_set=[(X_test, y_test)],
66
        eval_metric=[rmsle, rae],
67
68
        early_stopping_rounds=5)

69
print('Starting predicting...')
70
71
72
# predict
y_pred = gbm.predict(X_test, num_iteration=gbm.best_iteration_)
# eval
73
74
75
76
rmsle_test = rmsle(y_test, y_pred)[1]
rae_test = rae(y_test, y_pred)[1]
print(f'The RMSLE of prediction is: {rmsle_test}')
print(f'The RAE of prediction is: {rae_test}')
77

78
# other scikit-learn modules
79
80
81
82
83
84
85
estimator = lgb.LGBMRegressor(num_leaves=31)

param_grid = {
    'learning_rate': [0.01, 0.1, 1],
    'n_estimators': [20, 40]
}

86
gbm = GridSearchCV(estimator, param_grid, cv=3)
87
88
gbm.fit(X_train, y_train)

89
print(f'Best parameters found by grid search are: {gbm.best_params_}')