Unverified Commit 4ee0bc05 authored by James Lamb's avatar James Lamb Committed by GitHub
Browse files

[python-package] stop relying on string concatenation / splitting for cv() eval results (#6761)


Co-authored-by: default avatarNikita Titov <nekit94-08@mail.ru>
parent ee0131c5
...@@ -71,6 +71,14 @@ class CallbackEnv: ...@@ -71,6 +71,14 @@ class CallbackEnv:
evaluation_result_list: Optional[_ListOfEvalResultTuples] evaluation_result_list: Optional[_ListOfEvalResultTuples]
def _is_using_cv(env: CallbackEnv) -> bool:
"""Check if model in callback env is a CVBooster."""
# this import is here to avoid a circular import
from .engine import CVBooster
return isinstance(env.model, CVBooster)
def _format_eval_result(value: _EvalResultTuple, show_stdv: bool) -> str: def _format_eval_result(value: _EvalResultTuple, show_stdv: bool) -> str:
"""Format metric string.""" """Format metric string."""
dataset_name, metric_name, metric_value, *_ = value dataset_name, metric_name, metric_value, *_ = value
...@@ -143,16 +151,13 @@ class _RecordEvaluationCallback: ...@@ -143,16 +151,13 @@ class _RecordEvaluationCallback:
) )
self.eval_result.clear() self.eval_result.clear()
for item in env.evaluation_result_list: for item in env.evaluation_result_list:
if len(item) == 4: # regular train dataset_name, metric_name, *_ = item
data_name, eval_name = item[:2] self.eval_result.setdefault(dataset_name, OrderedDict())
else: # cv
data_name, eval_name = item[1].split()
self.eval_result.setdefault(data_name, OrderedDict())
if len(item) == 4: if len(item) == 4:
self.eval_result[data_name].setdefault(eval_name, []) self.eval_result[dataset_name].setdefault(metric_name, [])
else: else:
self.eval_result[data_name].setdefault(f"{eval_name}-mean", []) self.eval_result[dataset_name].setdefault(f"{metric_name}-mean", [])
self.eval_result[data_name].setdefault(f"{eval_name}-stdv", []) self.eval_result[dataset_name].setdefault(f"{metric_name}-stdv", [])
def __call__(self, env: CallbackEnv) -> None: def __call__(self, env: CallbackEnv) -> None:
if env.iteration == env.begin_iteration: if env.iteration == env.begin_iteration:
...@@ -163,15 +168,16 @@ class _RecordEvaluationCallback: ...@@ -163,15 +168,16 @@ class _RecordEvaluationCallback:
"Please report it at https://github.com/microsoft/LightGBM/issues" "Please report it at https://github.com/microsoft/LightGBM/issues"
) )
for item in env.evaluation_result_list: for item in env.evaluation_result_list:
# for cv(), 'metric_value' is actually a mean of metric values over all CV folds
dataset_name, metric_name, metric_value, *_ = item
if len(item) == 4: if len(item) == 4:
data_name, eval_name, result = item[:3] # train()
self.eval_result[data_name][eval_name].append(result) self.eval_result[dataset_name][metric_name].append(metric_value)
else: else:
data_name, eval_name = item[1].split() # cv()
res_mean = item[2] metric_std_dev = item[4] # type: ignore[misc]
res_stdv = item[4] # type: ignore[misc] self.eval_result[dataset_name][f"{metric_name}-mean"].append(metric_value)
self.eval_result[data_name][f"{eval_name}-mean"].append(res_mean) self.eval_result[dataset_name][f"{metric_name}-stdv"].append(metric_std_dev)
self.eval_result[data_name][f"{eval_name}-stdv"].append(res_stdv)
def record_evaluation(eval_result: Dict[str, Dict[str, List[Any]]]) -> Callable: def record_evaluation(eval_result: Dict[str, Dict[str, List[Any]]]) -> Callable:
...@@ -304,15 +310,15 @@ class _EarlyStoppingCallback: ...@@ -304,15 +310,15 @@ class _EarlyStoppingCallback:
def _lt_delta(self, curr_score: float, best_score: float, delta: float) -> bool: def _lt_delta(self, curr_score: float, best_score: float, delta: float) -> bool:
return curr_score < best_score - delta return curr_score < best_score - delta
def _is_train_set(self, ds_name: str, eval_name: str, env: CallbackEnv) -> bool: def _is_train_set(self, dataset_name: str, env: CallbackEnv) -> bool:
"""Check, by name, if a given Dataset is the training data.""" """Check, by name, if a given Dataset is the training data."""
# for lgb.cv() with eval_train_metric=True, evaluation is also done on the training set # for lgb.cv() with eval_train_metric=True, evaluation is also done on the training set
# and those metrics are considered for early stopping # and those metrics are considered for early stopping
if ds_name == "cv_agg" and eval_name == "train": if _is_using_cv(env) and dataset_name == "train":
return True return True
# for lgb.train(), it's possible to pass the training data via valid_sets with any eval_name # for lgb.train(), it's possible to pass the training data via valid_sets with any eval_name
if isinstance(env.model, Booster) and ds_name == env.model._train_data_name: if isinstance(env.model, Booster) and dataset_name == env.model._train_data_name:
return True return True
return False return False
...@@ -327,11 +333,13 @@ class _EarlyStoppingCallback: ...@@ -327,11 +333,13 @@ class _EarlyStoppingCallback:
_log_warning("Early stopping is not available in dart mode") _log_warning("Early stopping is not available in dart mode")
return return
# get details of the first dataset
first_dataset_name, first_metric_name, *_ = env.evaluation_result_list[0]
# validation sets are guaranteed to not be identical to the training data in cv() # validation sets are guaranteed to not be identical to the training data in cv()
if isinstance(env.model, Booster): if isinstance(env.model, Booster):
only_train_set = len(env.evaluation_result_list) == 1 and self._is_train_set( only_train_set = len(env.evaluation_result_list) == 1 and self._is_train_set(
ds_name=env.evaluation_result_list[0][0], dataset_name=first_dataset_name,
eval_name=env.evaluation_result_list[0][1].split(" ")[0],
env=env, env=env,
) )
if only_train_set: if only_train_set:
...@@ -370,8 +378,7 @@ class _EarlyStoppingCallback: ...@@ -370,8 +378,7 @@ class _EarlyStoppingCallback:
_log_info(f"Using {self.min_delta} as min_delta for all metrics.") _log_info(f"Using {self.min_delta} as min_delta for all metrics.")
deltas = [self.min_delta] * n_datasets * n_metrics deltas = [self.min_delta] * n_datasets * n_metrics
# split is needed for "<dataset type> <metric>" case (e.g. "train l1") self.first_metric = first_metric_name
self.first_metric = env.evaluation_result_list[0][1].split(" ")[-1]
for eval_ret, delta in zip(env.evaluation_result_list, deltas): for eval_ret, delta in zip(env.evaluation_result_list, deltas):
self.best_iter.append(0) self.best_iter.append(0)
if eval_ret[3]: # greater is better if eval_ret[3]: # greater is better
...@@ -381,7 +388,7 @@ class _EarlyStoppingCallback: ...@@ -381,7 +388,7 @@ class _EarlyStoppingCallback:
self.best_score.append(float("inf")) self.best_score.append(float("inf"))
self.cmp_op.append(partial(self._lt_delta, delta=delta)) self.cmp_op.append(partial(self._lt_delta, delta=delta))
def _final_iteration_check(self, env: CallbackEnv, eval_name_splitted: List[str], i: int) -> None: def _final_iteration_check(self, *, env: CallbackEnv, metric_name: str, i: int) -> None:
if env.iteration == env.end_iteration - 1: if env.iteration == env.end_iteration - 1:
if self.verbose: if self.verbose:
best_score_str = "\t".join([_format_eval_result(x, show_stdv=True) for x in self.best_score_list[i]]) best_score_str = "\t".join([_format_eval_result(x, show_stdv=True) for x in self.best_score_list[i]])
...@@ -389,7 +396,7 @@ class _EarlyStoppingCallback: ...@@ -389,7 +396,7 @@ class _EarlyStoppingCallback:
"Did not meet early stopping. " f"Best iteration is:\n[{self.best_iter[i] + 1}]\t{best_score_str}" "Did not meet early stopping. " f"Best iteration is:\n[{self.best_iter[i] + 1}]\t{best_score_str}"
) )
if self.first_metric_only: if self.first_metric_only:
_log_info(f"Evaluated only: {eval_name_splitted[-1]}") _log_info(f"Evaluated only: {metric_name}")
raise EarlyStopException(self.best_iter[i], self.best_score_list[i]) raise EarlyStopException(self.best_iter[i], self.best_score_list[i])
def __call__(self, env: CallbackEnv) -> None: def __call__(self, env: CallbackEnv) -> None:
...@@ -405,21 +412,18 @@ class _EarlyStoppingCallback: ...@@ -405,21 +412,18 @@ class _EarlyStoppingCallback:
# self.best_score_list is initialized to an empty list # self.best_score_list is initialized to an empty list
first_time_updating_best_score_list = self.best_score_list == [] first_time_updating_best_score_list = self.best_score_list == []
for i in range(len(env.evaluation_result_list)): for i in range(len(env.evaluation_result_list)):
score = env.evaluation_result_list[i][2] dataset_name, metric_name, metric_value, *_ = env.evaluation_result_list[i]
if first_time_updating_best_score_list or self.cmp_op[i](score, self.best_score[i]): if first_time_updating_best_score_list or self.cmp_op[i](metric_value, self.best_score[i]):
self.best_score[i] = score self.best_score[i] = metric_value
self.best_iter[i] = env.iteration self.best_iter[i] = env.iteration
if first_time_updating_best_score_list: if first_time_updating_best_score_list:
self.best_score_list.append(env.evaluation_result_list) self.best_score_list.append(env.evaluation_result_list)
else: else:
self.best_score_list[i] = env.evaluation_result_list self.best_score_list[i] = env.evaluation_result_list
# split is needed for "<dataset type> <metric>" case (e.g. "train l1") if self.first_metric_only and self.first_metric != metric_name:
eval_name_splitted = env.evaluation_result_list[i][1].split(" ")
if self.first_metric_only and self.first_metric != eval_name_splitted[-1]:
continue # use only the first metric for early stopping continue # use only the first metric for early stopping
if self._is_train_set( if self._is_train_set(
ds_name=env.evaluation_result_list[i][0], dataset_name=dataset_name,
eval_name=eval_name_splitted[0],
env=env, env=env,
): ):
continue # train data for lgb.cv or sklearn wrapper (underlying lgb.train) continue # train data for lgb.cv or sklearn wrapper (underlying lgb.train)
...@@ -430,9 +434,9 @@ class _EarlyStoppingCallback: ...@@ -430,9 +434,9 @@ class _EarlyStoppingCallback:
) )
_log_info(f"Early stopping, best iteration is:\n[{self.best_iter[i] + 1}]\t{eval_result_str}") _log_info(f"Early stopping, best iteration is:\n[{self.best_iter[i] + 1}]\t{eval_result_str}")
if self.first_metric_only: if self.first_metric_only:
_log_info(f"Evaluated only: {eval_name_splitted[-1]}") _log_info(f"Evaluated only: {metric_name}")
raise EarlyStopException(self.best_iter[i], self.best_score_list[i]) raise EarlyStopException(self.best_iter[i], self.best_score_list[i])
self._final_iteration_check(env, eval_name_splitted, i) self._final_iteration_check(env=env, metric_name=metric_name, i=i)
def _should_enable_early_stopping(stopping_rounds: Any) -> bool: def _should_enable_early_stopping(stopping_rounds: Any) -> bool:
......
...@@ -581,15 +581,31 @@ def _agg_cv_result( ...@@ -581,15 +581,31 @@ def _agg_cv_result(
raw_results: List[List[_LGBM_BoosterEvalMethodResultType]], raw_results: List[List[_LGBM_BoosterEvalMethodResultType]],
) -> List[_LGBM_BoosterEvalMethodResultWithStandardDeviationType]: ) -> List[_LGBM_BoosterEvalMethodResultWithStandardDeviationType]:
"""Aggregate cross-validation results.""" """Aggregate cross-validation results."""
cvmap: Dict[str, List[float]] = OrderedDict() # build up 2 maps, of the form:
metric_type: Dict[str, bool] = {} #
# OrderedDict{
# (<dataset_name>, <metric_name>): <is_higher_better>
# }
#
# OrderedDict{
# (<dataset_name>, <metric_name>): list[<metric_value>]
# }
#
metric_types: Dict[Tuple[str, str], bool] = OrderedDict()
metric_values: Dict[Tuple[str, str], List[float]] = OrderedDict()
for one_result in raw_results: for one_result in raw_results:
for one_line in one_result: for dataset_name, metric_name, metric_value, is_higher_better in one_result:
key = f"{one_line[0]} {one_line[1]}" key = (dataset_name, metric_name)
metric_type[key] = one_line[3] metric_types[key] = is_higher_better
cvmap.setdefault(key, []) metric_values.setdefault(key, [])
cvmap[key].append(one_line[2]) metric_values[key].append(metric_value)
return [("cv_agg", k, float(np.mean(v)), metric_type[k], float(np.std(v))) for k, v in cvmap.items()]
# turn that into a list of tuples of the form:
#
# [
# (<dataset_name>, <metric_name>, mean(<values>), <is_higher_better>, std_dev(<values>))
# ]
return [(k[0], k[1], float(np.mean(v)), metric_types[k], float(np.std(v))) for k, v in metric_values.items()]
def cv( def cv(
...@@ -812,9 +828,9 @@ def cv( ...@@ -812,9 +828,9 @@ def cv(
) )
cvbooster.update(fobj=fobj) # type: ignore[call-arg] cvbooster.update(fobj=fobj) # type: ignore[call-arg]
res = _agg_cv_result(cvbooster.eval_valid(feval)) # type: ignore[call-arg] res = _agg_cv_result(cvbooster.eval_valid(feval)) # type: ignore[call-arg]
for _, key, mean, _, std in res: for dataset_name, metric_name, metric_mean, _, metric_std_dev in res:
results[f"{key}-mean"].append(mean) results[f"{dataset_name} {metric_name}-mean"].append(metric_mean)
results[f"{key}-stdv"].append(std) results[f"{dataset_name} {metric_name}-stdv"].append(metric_std_dev)
try: try:
for cb in callbacks_after_iter: for cb in callbacks_after_iter:
cb( cb(
......
...@@ -64,6 +64,13 @@ def constant_metric(preds, train_data): ...@@ -64,6 +64,13 @@ def constant_metric(preds, train_data):
return ("error", 0.0, False) return ("error", 0.0, False)
def constant_metric_multi(preds, train_data):
return [
("important_metric", 1.5, False),
("irrelevant_metric", 7.8, False),
]
def decreasing_metric(preds, train_data): def decreasing_metric(preds, train_data):
return ("decreasing_metric", next(decreasing_generator), False) return ("decreasing_metric", next(decreasing_generator), False)
...@@ -2570,6 +2577,13 @@ def test_metrics(): ...@@ -2570,6 +2577,13 @@ def test_metrics():
assert "valid binary_logloss-mean" in res assert "valid binary_logloss-mean" in res
assert "valid error-mean" in res assert "valid error-mean" in res
# default metric in args with 1 custom function returning a list of 2 metrics
res = get_cv_result(metrics="binary_logloss", feval=constant_metric_multi)
assert len(res) == 6
assert "valid binary_logloss-mean" in res
assert res["valid important_metric-mean"] == [1.5, 1.5]
assert res["valid irrelevant_metric-mean"] == [7.8, 7.8]
# non-default metric in args with custom one # non-default metric in args with custom one
res = get_cv_result(metrics="binary_error", feval=constant_metric) res = get_cv_result(metrics="binary_error", feval=constant_metric)
assert len(res) == 4 assert len(res) == 4
...@@ -2703,6 +2717,13 @@ def test_metrics(): ...@@ -2703,6 +2717,13 @@ def test_metrics():
assert "binary_logloss" in evals_result["valid_0"] assert "binary_logloss" in evals_result["valid_0"]
assert "error" in evals_result["valid_0"] assert "error" in evals_result["valid_0"]
# default metric in params with custom function returning a list of 2 metrics
train_booster(params=params_obj_metric_log_verbose, feval=constant_metric_multi)
assert len(evals_result["valid_0"]) == 3
assert "binary_logloss" in evals_result["valid_0"]
assert evals_result["valid_0"]["important_metric"] == [1.5, 1.5]
assert evals_result["valid_0"]["irrelevant_metric"] == [7.8, 7.8]
# non-default metric in params with custom one # non-default metric in params with custom one
train_booster(params=params_obj_metric_err_verbose, feval=constant_metric) train_booster(params=params_obj_metric_err_verbose, feval=constant_metric)
assert len(evals_result["valid_0"]) == 2 assert len(evals_result["valid_0"]) == 2
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment