sklearn_example.py 2.72 KB
Newer Older
wxchan's avatar
wxchan committed
1
# coding: utf-8
2
3
from pathlib import Path

4
import numpy as np
wxchan's avatar
wxchan committed
5
6
import pandas as pd
from sklearn.metrics import mean_squared_error
7
from sklearn.model_selection import GridSearchCV
wxchan's avatar
wxchan committed
8

9
10
import lightgbm as lgb

11
print('Loading data...')
wxchan's avatar
wxchan committed
12
# load or create your dataset
13
14
15
regression_example_dir = Path(__file__).absolute().parents[1] / 'regression'
df_train = pd.read_csv(str(regression_example_dir / 'regression.train'), header=None, sep='\t')
df_test = pd.read_csv(str(regression_example_dir / 'regression.test'), header=None, sep='\t')
wxchan's avatar
wxchan committed
16

17
18
19
20
y_train = df_train[0]
y_test = df_test[0]
X_train = df_train.drop(0, axis=1)
X_test = df_test.drop(0, axis=1)
wxchan's avatar
wxchan committed
21

22
print('Starting training...')
wxchan's avatar
wxchan committed
23
# train
24
gbm = lgb.LGBMRegressor(num_leaves=31,
wxchan's avatar
wxchan committed
25
                        learning_rate=0.05,
26
                        n_estimators=20)
wxchan's avatar
wxchan committed
27
28
gbm.fit(X_train, y_train,
        eval_set=[(X_test, y_test)],
29
30
        eval_metric='l1',
        early_stopping_rounds=5)
wxchan's avatar
wxchan committed
31

32
print('Starting predicting...')
wxchan's avatar
wxchan committed
33
# predict
34
y_pred = gbm.predict(X_test, num_iteration=gbm.best_iteration_)
wxchan's avatar
wxchan committed
35
# eval
36
37
rmse_test = mean_squared_error(y_test, y_pred) ** 0.5
print(f'The RMSE of prediction is: {rmse_test}')
38
39

# feature importances
40
print(f'Feature importances: {list(gbm.feature_importances_)}')
41

42
43

# self-defined eval metric
44
# f(y_true: array, y_pred: array) -> name: str, eval_result: float, is_higher_better: bool
45
46
47
48
49
# Root Mean Squared Logarithmic Error (RMSLE)
def rmsle(y_true, y_pred):
    return 'RMSLE', np.sqrt(np.mean(np.power(np.log1p(y_pred) - np.log1p(y_true), 2))), False


50
print('Starting training with custom eval function...')
51
52
53
54
55
56
# train
gbm.fit(X_train, y_train,
        eval_set=[(X_test, y_test)],
        eval_metric=rmsle,
        early_stopping_rounds=5)

57
58

# another self-defined eval metric
59
# f(y_true: array, y_pred: array) -> name: str, eval_result: float, is_higher_better: bool
60
61
62
63
64
65
66
67
68
# Relative Absolute Error (RAE)
def rae(y_true, y_pred):
    return 'RAE', np.sum(np.abs(y_pred - y_true)) / np.sum(np.abs(np.mean(y_true) - y_true)), False


print('Starting training with multiple custom eval functions...')
# train
gbm.fit(X_train, y_train,
        eval_set=[(X_test, y_test)],
69
        eval_metric=[rmsle, rae],
70
71
        early_stopping_rounds=5)

72
print('Starting predicting...')
73
74
75
# predict
y_pred = gbm.predict(X_test, num_iteration=gbm.best_iteration_)
# eval
76
77
78
79
rmsle_test = rmsle(y_test, y_pred)[1]
rae_test = rae(y_test, y_pred)[1]
print(f'The RMSLE of prediction is: {rmsle_test}')
print(f'The RAE of prediction is: {rae_test}')
80

81
# other scikit-learn modules
82
83
84
85
86
87
88
estimator = lgb.LGBMRegressor(num_leaves=31)

param_grid = {
    'learning_rate': [0.01, 0.1, 1],
    'n_estimators': [20, 40]
}

89
gbm = GridSearchCV(estimator, param_grid, cv=3)
90
91
gbm.fit(X_train, y_train)

92
print(f'Best parameters found by grid search are: {gbm.best_params_}')