sklearn_example.py 2.62 KB
Newer Older
wxchan's avatar
wxchan committed
1
# coding: utf-8
2
3
from pathlib import Path

4
import numpy as np
wxchan's avatar
wxchan committed
5
6
import pandas as pd
from sklearn.metrics import mean_squared_error
7
from sklearn.model_selection import GridSearchCV
wxchan's avatar
wxchan committed
8

9
10
import lightgbm as lgb

11
print("Loading data...")
wxchan's avatar
wxchan committed
12
# load or create your dataset
13
14
15
regression_example_dir = Path(__file__).absolute().parents[1] / "regression"
df_train = pd.read_csv(str(regression_example_dir / "regression.train"), header=None, sep="\t")
df_test = pd.read_csv(str(regression_example_dir / "regression.test"), header=None, sep="\t")
wxchan's avatar
wxchan committed
16

17
18
19
20
y_train = df_train[0]
y_test = df_test[0]
X_train = df_train.drop(0, axis=1)
X_test = df_test.drop(0, axis=1)
wxchan's avatar
wxchan committed
21

22
print("Starting training...")
wxchan's avatar
wxchan committed
23
# train
24
25
26
27
gbm = lgb.LGBMRegressor(num_leaves=31, learning_rate=0.05, n_estimators=20)
gbm.fit(X_train, y_train, eval_set=[(X_test, y_test)], eval_metric="l1", callbacks=[lgb.early_stopping(5)])

print("Starting predicting...")
wxchan's avatar
wxchan committed
28
# predict
29
y_pred = gbm.predict(X_test, num_iteration=gbm.best_iteration_)
wxchan's avatar
wxchan committed
30
# eval
31
rmse_test = mean_squared_error(y_test, y_pred) ** 0.5
32
print(f"The RMSE of prediction is: {rmse_test}")
33
34

# feature importances
35
print(f"Feature importances: {list(gbm.feature_importances_)}")
36

37
38

# self-defined eval metric
39
# f(y_true: array, y_pred: array) -> name: str, eval_result: float, is_higher_better: bool
40
41
# Root Mean Squared Logarithmic Error (RMSLE)
def rmsle(y_true, y_pred):
42
    return "RMSLE", np.sqrt(np.mean(np.power(np.log1p(y_pred) - np.log1p(y_true), 2))), False
43
44


45
print("Starting training with custom eval function...")
46
# train
47
gbm.fit(X_train, y_train, eval_set=[(X_test, y_test)], eval_metric=rmsle, callbacks=[lgb.early_stopping(5)])
48

49
50

# another self-defined eval metric
51
# f(y_true: array, y_pred: array) -> name: str, eval_result: float, is_higher_better: bool
52
53
# Relative Absolute Error (RAE)
def rae(y_true, y_pred):
54
    return "RAE", np.sum(np.abs(y_pred - y_true)) / np.sum(np.abs(np.mean(y_true) - y_true)), False
55
56


57
print("Starting training with multiple custom eval functions...")
58
# train
59
gbm.fit(X_train, y_train, eval_set=[(X_test, y_test)], eval_metric=[rmsle, rae], callbacks=[lgb.early_stopping(5)])
60

61
print("Starting predicting...")
62
63
64
# predict
y_pred = gbm.predict(X_test, num_iteration=gbm.best_iteration_)
# eval
65
66
rmsle_test = rmsle(y_test, y_pred)[1]
rae_test = rae(y_test, y_pred)[1]
67
68
print(f"The RMSLE of prediction is: {rmsle_test}")
print(f"The RAE of prediction is: {rae_test}")
69

70
# other scikit-learn modules
71
72
estimator = lgb.LGBMRegressor(num_leaves=31)

73
param_grid = {"learning_rate": [0.01, 0.1, 1], "n_estimators": [20, 40]}
74

75
gbm = GridSearchCV(estimator, param_grid, cv=3)
76
77
gbm.fit(X_train, y_train)

78
print(f"Best parameters found by grid search are: {gbm.best_params_}")