Unverified Commit 47208894 authored by sayantan sadhu's avatar sayantan sadhu Committed by GitHub
Browse files

[python] use f-strings for concatenation in examples/python-guide/advanced_example.py (#4386)



* Improved the syntax of the fstrings

* Improved the strings to fstrings

* Reverted back the white space.

* Update examples/python-guide/advanced_example.py
Co-authored-by: default avatarNikita Titov <nekit94-08@mail.ru>
Co-authored-by: default avatarNikita Titov <nekit94-08@mail.ru>
parent bd21efed
...@@ -43,7 +43,7 @@ params = { ...@@ -43,7 +43,7 @@ params = {
} }
# generate feature names # generate feature names
feature_name = ['feature_' + str(col) for col in range(num_feature)] feature_name = [f'feature_{col}' for col in range(num_feature)]
print('Starting training...') print('Starting training...')
# feature_name and categorical_feature # feature_name and categorical_feature
...@@ -56,7 +56,7 @@ gbm = lgb.train(params, ...@@ -56,7 +56,7 @@ gbm = lgb.train(params,
print('Finished first 10 rounds...') print('Finished first 10 rounds...')
# check feature name # check feature name
print('7th feature name is:', lgb_train.feature_name[6]) print(f'7th feature name is: {lgb_train.feature_name[6]}')
print('Saving model...') print('Saving model...')
# save model to file # save model to file
...@@ -70,10 +70,10 @@ with open('model.json', 'w+') as f: ...@@ -70,10 +70,10 @@ with open('model.json', 'w+') as f:
json.dump(model_json, f, indent=4) json.dump(model_json, f, indent=4)
# feature names # feature names
print('Feature names:', gbm.feature_name()) print(f'Feature names: {gbm.feature_name()}')
# feature importances # feature importances
print('Feature importances:', list(gbm.feature_importance())) print(f'Feature importances: {list(gbm.feature_importance())}')
print('Loading model to predict...') print('Loading model to predict...')
# load model to predict # load model to predict
...@@ -81,7 +81,8 @@ bst = lgb.Booster(model_file='model.txt') ...@@ -81,7 +81,8 @@ bst = lgb.Booster(model_file='model.txt')
# can only predict with the best iteration (or the saving iteration) # can only predict with the best iteration (or the saving iteration)
y_pred = bst.predict(X_test) y_pred = bst.predict(X_test)
# eval with loaded model # eval with loaded model
print("The rmse of loaded model's prediction is:", mean_squared_error(y_test, y_pred) ** 0.5) rmse_loaded_model = mean_squared_error(y_test, y_pred) ** 0.5
print(f"The RMSE of loaded model's prediction is: {rmse_loaded_model}")
print('Dumping and loading model with pickle...') print('Dumping and loading model with pickle...')
# dump model with pickle # dump model with pickle
...@@ -93,7 +94,8 @@ with open('model.pkl', 'rb') as fin: ...@@ -93,7 +94,8 @@ with open('model.pkl', 'rb') as fin:
# can predict with any iteration when loaded in pickle way # can predict with any iteration when loaded in pickle way
y_pred = pkl_bst.predict(X_test, num_iteration=7) y_pred = pkl_bst.predict(X_test, num_iteration=7)
# eval with loaded model # eval with loaded model
print("The RMSE of pickled model's prediction is:", mean_squared_error(y_test, y_pred) ** 0.5) rmse_pickled_model = mean_squared_error(y_test, y_pred) ** 0.5
print(f"The RMSE of pickled model's prediction is: {rmse_pickled_model}")
# continue training # continue training
# init_model accepts: # init_model accepts:
...@@ -187,8 +189,7 @@ gbm = lgb.train(params, ...@@ -187,8 +189,7 @@ gbm = lgb.train(params,
feval=[binary_error, accuracy], feval=[binary_error, accuracy],
valid_sets=lgb_eval) valid_sets=lgb_eval)
print('Finished 50 - 60 rounds with self-defined objective function ' print('Finished 50 - 60 rounds with self-defined objective function and multiple self-defined eval metrics...')
'and multiple self-defined eval metrics...')
print('Starting a new training job...') print('Starting a new training job...')
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment