Unverified Commit b81f7dd6 authored by Nikita Titov's avatar Nikita Titov Committed by GitHub
Browse files

[python] remove `learning_rates` argument of `train()` function(#4831)

parent 2f5d8985
...@@ -112,15 +112,15 @@ gbm = lgb.train(params, ...@@ -112,15 +112,15 @@ gbm = lgb.train(params,
print('Finished 10 - 20 rounds with model file...') print('Finished 10 - 20 rounds with model file...')
# decay learning rates # decay learning rates
# learning_rates accepts: # reset_parameter callback accepts:
# 1. list/tuple with length = num_boost_round # 1. list with length = num_boost_round
# 2. function(curr_iter) # 2. function(curr_iter)
gbm = lgb.train(params, gbm = lgb.train(params,
lgb_train, lgb_train,
num_boost_round=10, num_boost_round=10,
init_model=gbm, init_model=gbm,
learning_rates=lambda iter: 0.05 * (0.99 ** iter), valid_sets=lgb_eval,
valid_sets=lgb_eval) callbacks=[lgb.reset_parameter(learning_rate=lambda iter: 0.05 * (0.99 ** iter))])
print('Finished 20 - 30 rounds with decay learning rates...') print('Finished 20 - 30 rounds with decay learning rates...')
......
...@@ -36,7 +36,6 @@ def train( ...@@ -36,7 +36,6 @@ def train(
early_stopping_rounds: Optional[int] = None, early_stopping_rounds: Optional[int] = None,
evals_result: Optional[Dict[str, Any]] = None, evals_result: Optional[Dict[str, Any]] = None,
verbose_eval: Union[bool, int, str] = 'warn', verbose_eval: Union[bool, int, str] = 'warn',
learning_rates: Optional[Union[List[float], Callable[[int], float]]] = None,
keep_training_booster: bool = False, keep_training_booster: bool = False,
callbacks: Optional[List[Callable]] = None callbacks: Optional[List[Callable]] = None
) -> Booster: ) -> Booster:
...@@ -145,10 +144,6 @@ def train( ...@@ -145,10 +144,6 @@ def train(
With ``verbose_eval`` = 4 and at least one item in ``valid_sets``, With ``verbose_eval`` = 4 and at least one item in ``valid_sets``,
an evaluation metric is printed every 4 (instead of 1) boosting stages. an evaluation metric is printed every 4 (instead of 1) boosting stages.
learning_rates : list, callable or None, optional (default=None)
List of learning rates for each boosting round
or a callable that calculates ``learning_rate``
in terms of current number of round (e.g. yields learning rate decay).
keep_training_booster : bool, optional (default=False) keep_training_booster : bool, optional (default=False)
Whether the returned Booster will be used to keep training. Whether the returned Booster will be used to keep training.
If False, the returned value will be converted into _InnerPredictor before returning. If False, the returned value will be converted into _InnerPredictor before returning.
...@@ -251,11 +246,6 @@ def train( ...@@ -251,11 +246,6 @@ def train(
if early_stopping_rounds is not None and early_stopping_rounds > 0: if early_stopping_rounds is not None and early_stopping_rounds > 0:
callbacks.add(callback.early_stopping(early_stopping_rounds, first_metric_only, verbose=bool(verbose_eval))) callbacks.add(callback.early_stopping(early_stopping_rounds, first_metric_only, verbose=bool(verbose_eval)))
if learning_rates is not None:
_log_warning("'learning_rates' argument is deprecated and will be removed in a future release of LightGBM. "
"Pass 'reset_parameter()' callback via 'callbacks' argument instead.")
callbacks.add(callback.reset_parameter(learning_rate=learning_rates))
if evals_result is not None: if evals_result is not None:
_log_warning("'evals_result' argument is deprecated and will be removed in a future release of LightGBM. " _log_warning("'evals_result' argument is deprecated and will be removed in a future release of LightGBM. "
"Pass 'record_evaluation()' callback via 'callbacks' argument instead.") "Pass 'record_evaluation()' callback via 'callbacks' argument instead.")
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment