Unverified Commit c8482cc0 authored by NovusEdge's avatar NovusEdge Committed by GitHub
Browse files

[python] added f-string to python-package/lightgbm/callback.py (#4142)



* added f-string

* fixed indent issues

* fixed some linting issues

* trying to fix E225 for 233:65

* Update python-package/lightgbm/callback.py

* first_metric_only log message

* Apply suggestions from code review
Co-authored-by: default avatarNikita Titov <nekit94-08@mail.ru>
Co-authored-by: default avatarJames Lamb <jaylamb20@gmail.com>
Co-authored-by: default avatarNikita Titov <nekit94-08@mail.ru>
parent 676c95fb
...@@ -39,12 +39,12 @@ CallbackEnv = collections.namedtuple( ...@@ -39,12 +39,12 @@ CallbackEnv = collections.namedtuple(
def _format_eval_result(value: list, show_stdv: bool = True) -> str: def _format_eval_result(value: list, show_stdv: bool = True) -> str:
"""Format metric string.""" """Format metric string."""
if len(value) == 4: if len(value) == 4:
return '%s\'s %s: %g' % (value[0], value[1], value[2]) return f"{value[0]}'s {value[1]}: {value[2]:g}"
elif len(value) == 5: elif len(value) == 5:
if show_stdv: if show_stdv:
return '%s\'s %s: %g + %g' % (value[0], value[1], value[2], value[4]) return f"{value[0]}'s {value[1]}: {value[2]:g} + {value[4]:g}"
else: else:
return '%s\'s %s: %g' % (value[0], value[1], value[2]) return f"{value[0]}'s {value[1]}: {value[2]:g}"
else: else:
raise ValueError("Wrong metric value") raise ValueError("Wrong metric value")
...@@ -67,7 +67,7 @@ def print_evaluation(period: int = 1, show_stdv: bool = True) -> Callable: ...@@ -67,7 +67,7 @@ def print_evaluation(period: int = 1, show_stdv: bool = True) -> Callable:
def _callback(env: CallbackEnv) -> None: def _callback(env: CallbackEnv) -> None:
if period > 0 and env.evaluation_result_list and (env.iteration + 1) % period == 0: if period > 0 and env.evaluation_result_list and (env.iteration + 1) % period == 0:
result = '\t'.join([_format_eval_result(x, show_stdv) for x in env.evaluation_result_list]) result = '\t'.join([_format_eval_result(x, show_stdv) for x in env.evaluation_result_list])
_log_info('[%d]\t%s' % (env.iteration + 1, result)) _log_info(f'[{env.iteration + 1}]\t{result}')
_callback.order = 10 # type: ignore _callback.order = 10 # type: ignore
return _callback return _callback
...@@ -129,8 +129,7 @@ def reset_parameter(**kwargs: Union[list, Callable]) -> Callable: ...@@ -129,8 +129,7 @@ def reset_parameter(**kwargs: Union[list, Callable]) -> Callable:
for key, value in kwargs.items(): for key, value in kwargs.items():
if isinstance(value, list): if isinstance(value, list):
if len(value) != env.end_iteration - env.begin_iteration: if len(value) != env.end_iteration - env.begin_iteration:
raise ValueError("Length of list {} has to equal to 'num_boost_round'." raise ValueError(f"Length of list {repr(key)} has to equal to 'num_boost_round'.")
.format(repr(key)))
new_param = value[env.iteration - env.begin_iteration] new_param = value[env.iteration - env.begin_iteration]
else: else:
new_param = value(env.iteration - env.begin_iteration) new_param = value(env.iteration - env.begin_iteration)
...@@ -187,7 +186,7 @@ def early_stopping(stopping_rounds: int, first_metric_only: bool = False, verbos ...@@ -187,7 +186,7 @@ def early_stopping(stopping_rounds: int, first_metric_only: bool = False, verbos
'at least one dataset and eval metric is required for evaluation') 'at least one dataset and eval metric is required for evaluation')
if verbose: if verbose:
_log_info("Training until validation scores don't improve for {} rounds".format(stopping_rounds)) _log_info(f"Training until validation scores don't improve for {stopping_rounds} rounds")
# split is needed for "<dataset type> <metric>" case (e.g. "train l1") # split is needed for "<dataset type> <metric>" case (e.g. "train l1")
first_metric[0] = env.evaluation_result_list[0][1].split(" ")[-1] first_metric[0] = env.evaluation_result_list[0][1].split(" ")[-1]
...@@ -204,10 +203,11 @@ def early_stopping(stopping_rounds: int, first_metric_only: bool = False, verbos ...@@ -204,10 +203,11 @@ def early_stopping(stopping_rounds: int, first_metric_only: bool = False, verbos
def _final_iteration_check(env: CallbackEnv, eval_name_splitted: List[str], i: int) -> None: def _final_iteration_check(env: CallbackEnv, eval_name_splitted: List[str], i: int) -> None:
if env.iteration == env.end_iteration - 1: if env.iteration == env.end_iteration - 1:
if verbose: if verbose:
_log_info('Did not meet early stopping. Best iteration is:\n[%d]\t%s' % ( best_score_str = '\t'.join([_format_eval_result(x) for x in best_score_list[i]])
best_iter[i] + 1, '\t'.join([_format_eval_result(x) for x in best_score_list[i]]))) _log_info('Did not meet early stopping. '
f'Best iteration is:\n[{best_iter[i] + 1}]\t{best_score_str}')
if first_metric_only: if first_metric_only:
_log_info("Evaluated only: {}".format(eval_name_splitted[-1])) _log_info(f"Evaluated only: {eval_name_splitted[-1]}")
raise EarlyStopException(best_iter[i], best_score_list[i]) raise EarlyStopException(best_iter[i], best_score_list[i])
def _callback(env: CallbackEnv) -> None: def _callback(env: CallbackEnv) -> None:
...@@ -231,10 +231,10 @@ def early_stopping(stopping_rounds: int, first_metric_only: bool = False, verbos ...@@ -231,10 +231,10 @@ def early_stopping(stopping_rounds: int, first_metric_only: bool = False, verbos
continue # train data for lgb.cv or sklearn wrapper (underlying lgb.train) continue # train data for lgb.cv or sklearn wrapper (underlying lgb.train)
elif env.iteration - best_iter[i] >= stopping_rounds: elif env.iteration - best_iter[i] >= stopping_rounds:
if verbose: if verbose:
_log_info('Early stopping, best iteration is:\n[%d]\t%s' % ( eval_result_str = '\t'.join([_format_eval_result(x) for x in best_score_list[i]])
best_iter[i] + 1, '\t'.join([_format_eval_result(x) for x in best_score_list[i]]))) _log_info(f"Early stopping, best iteration is:\n[{best_iter[i] + 1}]\t{eval_result_str}")
if first_metric_only: if first_metric_only:
_log_info("Evaluated only: {}".format(eval_name_splitted[-1])) _log_info(f"Evaluated only: {eval_name_splitted[-1]}")
raise EarlyStopException(best_iter[i], best_score_list[i]) raise EarlyStopException(best_iter[i], best_score_list[i])
_final_iteration_check(env, eval_name_splitted, i) _final_iteration_check(env, eval_name_splitted, i)
_callback.order = 30 # type: ignore _callback.order = 30 # type: ignore
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment