"git@developer.sourcefind.cn:tianlh/lightgbm-dcu.git" did not exist on "2caf945f9dc47edf445ea1e325ce59d04738f370"
Unverified Commit e4c0ca5f authored by Nikita Titov's avatar Nikita Titov Committed by GitHub
Browse files

[python] remove `evals_result` argument of `train()` function (#4882)

parent 34b74842
...@@ -148,8 +148,10 @@ ...@@ -148,8 +148,10 @@
" valid_sets=[lgb_train, lgb_test],\n", " valid_sets=[lgb_train, lgb_test],\n",
" feature_name=[f'f{i + 1}' for i in range(X_train.shape[-1])],\n", " feature_name=[f'f{i + 1}' for i in range(X_train.shape[-1])],\n",
" categorical_feature=[21],\n", " categorical_feature=[21],\n",
" evals_result=evals_result,\n", " callbacks=[\n",
" callbacks=[lgb.log_evaluation(10)])" " lgb.log_evaluation(10),\n",
" lgb.record_evaluation(evals_result)\n",
" ])"
] ]
}, },
{ {
......
...@@ -36,14 +36,18 @@ evals_result = {} # to record eval results for plotting ...@@ -36,14 +36,18 @@ evals_result = {} # to record eval results for plotting
print('Starting training...') print('Starting training...')
# train # train
gbm = lgb.train(params, gbm = lgb.train(
params,
lgb_train, lgb_train,
num_boost_round=100, num_boost_round=100,
valid_sets=[lgb_train, lgb_test], valid_sets=[lgb_train, lgb_test],
feature_name=[f'f{i + 1}' for i in range(X_train.shape[-1])], feature_name=[f'f{i + 1}' for i in range(X_train.shape[-1])],
categorical_feature=[21], categorical_feature=[21],
evals_result=evals_result, callbacks=[
callbacks=[lgb.log_evaluation(10)]) lgb.log_evaluation(10),
lgb.record_evaluation(evals_result)
]
)
print('Plotting metrics recorded during training...') print('Plotting metrics recorded during training...')
ax = lgb.plot_metric(evals_result, metric='l1') ax = lgb.plot_metric(evals_result, metric='l1')
......
...@@ -34,7 +34,6 @@ def train( ...@@ -34,7 +34,6 @@ def train(
feature_name: Union[List[str], str] = 'auto', feature_name: Union[List[str], str] = 'auto',
categorical_feature: Union[List[str], List[int], str] = 'auto', categorical_feature: Union[List[str], List[int], str] = 'auto',
early_stopping_rounds: Optional[int] = None, early_stopping_rounds: Optional[int] = None,
evals_result: Optional[Dict[str, Any]] = None,
keep_training_booster: bool = False, keep_training_booster: bool = False,
callbacks: Optional[List[Callable]] = None callbacks: Optional[List[Callable]] = None
) -> Booster: ) -> Booster:
...@@ -119,19 +118,6 @@ def train( ...@@ -119,19 +118,6 @@ def train(
To check only the first metric, set the ``first_metric_only`` parameter to ``True`` in ``params``. To check only the first metric, set the ``first_metric_only`` parameter to ``True`` in ``params``.
The index of iteration that has the best performance will be saved in the ``best_iteration`` field The index of iteration that has the best performance will be saved in the ``best_iteration`` field
if early stopping logic is enabled by setting ``early_stopping_rounds``. if early stopping logic is enabled by setting ``early_stopping_rounds``.
evals_result : dict or None, optional (default=None)
Dictionary used to store all evaluation results of all the items in ``valid_sets``.
This should be initialized outside of your call to ``train()`` and should be empty.
Any initial contents of the dictionary will be deleted.
.. rubric:: Example
With a ``valid_sets`` = [valid_set, train_set],
``valid_names`` = ['eval', 'train']
and a ``params`` = {'metric': 'logloss'}
returns {'train': {'logloss': ['0.48253', '0.35953', ...]},
'eval': {'logloss': ['0.480385', '0.357756', ...]}}.
keep_training_booster : bool, optional (default=False) keep_training_booster : bool, optional (default=False)
Whether the returned Booster will be used to keep training. Whether the returned Booster will be used to keep training.
If False, the returned value will be converted into _InnerPredictor before returning. If False, the returned value will be converted into _InnerPredictor before returning.
...@@ -221,11 +207,6 @@ def train( ...@@ -221,11 +207,6 @@ def train(
if early_stopping_rounds is not None and early_stopping_rounds > 0: if early_stopping_rounds is not None and early_stopping_rounds > 0:
callbacks_set.add(callback.early_stopping(early_stopping_rounds, first_metric_only)) callbacks_set.add(callback.early_stopping(early_stopping_rounds, first_metric_only))
if evals_result is not None:
_log_warning("'evals_result' argument is deprecated and will be removed in a future release of LightGBM. "
"Pass 'record_evaluation()' callback via 'callbacks' argument instead.")
callbacks_set.add(callback.record_evaluation(evals_result))
callbacks_before_iter_set = {cb for cb in callbacks_set if getattr(cb, 'before_iteration', False)} callbacks_before_iter_set = {cb for cb in callbacks_set if getattr(cb, 'before_iteration', False)}
callbacks_after_iter_set = callbacks_set - callbacks_before_iter_set callbacks_after_iter_set = callbacks_set - callbacks_before_iter_set
callbacks_before_iter = sorted(callbacks_before_iter_set, key=attrgetter('order')) callbacks_before_iter = sorted(callbacks_before_iter_set, key=attrgetter('order'))
......
This diff is collapsed.
...@@ -198,7 +198,7 @@ def test_plot_metrics(params, breast_cancer_split, train_data): ...@@ -198,7 +198,7 @@ def test_plot_metrics(params, breast_cancer_split, train_data):
valid_sets=[train_data, test_data], valid_sets=[train_data, test_data],
valid_names=['v1', 'v2'], valid_names=['v1', 'v2'],
num_boost_round=10, num_boost_round=10,
evals_result=evals_result0) callbacks=[lgb.record_evaluation(evals_result0)])
with pytest.warns(UserWarning, match="More than one metric available, picking one to plot."): with pytest.warns(UserWarning, match="More than one metric available, picking one to plot."):
ax0 = lgb.plot_metric(evals_result0) ax0 = lgb.plot_metric(evals_result0)
assert isinstance(ax0, matplotlib.axes.Axes) assert isinstance(ax0, matplotlib.axes.Axes)
...@@ -258,7 +258,7 @@ def test_plot_metrics(params, breast_cancer_split, train_data): ...@@ -258,7 +258,7 @@ def test_plot_metrics(params, breast_cancer_split, train_data):
evals_result1 = {} evals_result1 = {}
lgb.train(params, train_data, lgb.train(params, train_data,
num_boost_round=10, num_boost_round=10,
evals_result=evals_result1) callbacks=[lgb.record_evaluation(evals_result1)])
with pytest.raises(ValueError, match="eval results cannot be empty."): with pytest.raises(ValueError, match="eval results cannot be empty."):
lgb.plot_metric(evals_result1) lgb.plot_metric(evals_result1)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment