Unverified Commit 6f0bc481 authored by James Lamb's avatar James Lamb Committed by GitHub
Browse files

[dask] fix mypy errors about padded eval_results (#5716)

parent 7fd708df
...@@ -160,12 +160,11 @@ def _pad_eval_names(lgbm_model: LGBMModel, required_names: List[str]) -> LGBMMod ...@@ -160,12 +160,11 @@ def _pad_eval_names(lgbm_model: LGBMModel, required_names: List[str]) -> LGBMMod
Allows users to rely on expected eval_set names being present when fitting DaskLGBM estimators with ``eval_set``. Allows users to rely on expected eval_set names being present when fitting DaskLGBM estimators with ``eval_set``.
""" """
not_evaluated = 'not evaluated'
for eval_name in required_names: for eval_name in required_names:
if eval_name not in lgbm_model.evals_result_: if eval_name not in lgbm_model.evals_result_:
lgbm_model.evals_result_[eval_name] = not_evaluated lgbm_model.evals_result_[eval_name] = {}
if eval_name not in lgbm_model.best_score_: if eval_name not in lgbm_model.best_score_:
lgbm_model.best_score_[eval_name] = not_evaluated lgbm_model.best_score_[eval_name] = {}
return lgbm_model return lgbm_model
...@@ -444,7 +443,7 @@ def _train( ...@@ -444,7 +443,7 @@ def _train(
List of (X, y) tuple pairs to use as validation sets. List of (X, y) tuple pairs to use as validation sets.
Note, that not all workers may receive chunks of every eval set within ``eval_set``. When the returned Note, that not all workers may receive chunks of every eval set within ``eval_set``. When the returned
lightgbm estimator is not trained using any chunks of a particular eval set, its corresponding component lightgbm estimator is not trained using any chunks of a particular eval set, its corresponding component
of evals_result_ and best_score_ will be 'not_evaluated'. of ``evals_result_`` and ``best_score_`` will be empty dictionaries.
eval_names : list of str, or None, optional (default=None) eval_names : list of str, or None, optional (default=None)
Names of eval_set. Names of eval_set.
eval_sample_weight : list of Dask Array or Dask Series, or None, optional (default=None) eval_sample_weight : list of Dask Array or Dask Series, or None, optional (default=None)
......
...@@ -1127,7 +1127,7 @@ def test_eval_set_no_early_stopping(task, output, eval_sizes, eval_names_prefix, ...@@ -1127,7 +1127,7 @@ def test_eval_set_no_early_stopping(task, output, eval_sizes, eval_names_prefix,
# check that each eval_name and metric exists for all eval sets, allowing for the # check that each eval_name and metric exists for all eval sets, allowing for the
# case when a worker receives a fully-padded eval_set component which is not evaluated. # case when a worker receives a fully-padded eval_set component which is not evaluated.
if evals_result[eval_name] != 'not evaluated': if evals_result[eval_name] != {}:
for metric in eval_metric_names: for metric in eval_metric_names:
assert metric in evals_result[eval_name] assert metric in evals_result[eval_name]
assert metric in best_scores[eval_name] assert metric in best_scores[eval_name]
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment