Unverified Commit 3b6ebd79 authored by Michael Mahoney's avatar Michael Mahoney Committed by GitHub
Browse files

Add 'nrounds' as an alias for 'num_iterations' (fixes #4743) (#4746)

* Add 'nrounds' as an alias for 'num_iterations'

* Improve tests

* Compare against nrounds directly

* Fix whitespace lints
parent 99e0a4bd
......@@ -113,6 +113,7 @@
, "num_trees"
, "num_round"
, "num_rounds"
, "nrounds"
, "num_boost_round"
, "n_estimators"
, "max_iter"
......
......@@ -188,6 +188,68 @@ test_that("lightgbm() rejects negative or 0 value passed to nrounds", {
}
})
test_that("lightgbm() accepts nrounds as either a top-level argument or parameter", {
nrounds <- 15L
set.seed(708L)
top_level_bst <- lightgbm(
data = train$data
, label = train$label
, nrounds = nrounds
, params = list(
objective = "regression"
, metric = "l2"
, num_leaves = 5L
)
, save_name = tempfile(fileext = ".model")
)
set.seed(708L)
param_bst <- lightgbm(
data = train$data
, label = train$label
, params = list(
objective = "regression"
, metric = "l2"
, num_leaves = 5L
, nrounds = nrounds
)
, save_name = tempfile(fileext = ".model")
)
set.seed(708L)
both_customized <- lightgbm(
data = train$data
, label = train$label
, nrounds = 20L
, params = list(
objective = "regression"
, metric = "l2"
, num_leaves = 5L
, nrounds = nrounds
)
, save_name = tempfile(fileext = ".model")
)
top_level_l2 <- top_level_bst$eval_train()[[1L]][["value"]]
params_l2 <- param_bst$eval_train()[[1L]][["value"]]
both_l2 <- both_customized$eval_train()[[1L]][["value"]]
# check type just to be sure the subsetting didn't return a NULL
expect_true(is.numeric(top_level_l2))
expect_true(is.numeric(params_l2))
expect_true(is.numeric(both_l2))
# check that model produces identical performance
expect_identical(top_level_l2, params_l2)
expect_identical(both_l2, params_l2)
expect_identical(param_bst$current_iter(), top_level_bst$current_iter())
expect_identical(param_bst$current_iter(), both_customized$current_iter())
expect_identical(param_bst$current_iter(), nrounds)
})
test_that("lightgbm() performs evaluation on validation sets if they are provided", {
set.seed(708L)
dvalid1 <- lgb.Dataset(
......@@ -467,6 +529,76 @@ test_that("lgb.train() rejects negative or 0 value passed to nrounds", {
}
})
test_that("lgb.train() accepts nrounds as either a top-level argument or parameter", {
nrounds <- 15L
set.seed(708L)
top_level_bst <- lgb.train(
data = lgb.Dataset(
train$data
, label = train$label
)
, nrounds = nrounds
, params = list(
objective = "regression"
, metric = "l2"
, num_leaves = 5L
, save_name = tempfile(fileext = ".model")
)
)
set.seed(708L)
param_bst <- lgb.train(
data = lgb.Dataset(
train$data
, label = train$label
)
, params = list(
objective = "regression"
, metric = "l2"
, num_leaves = 5L
, nrounds = nrounds
, save_name = tempfile(fileext = ".model")
)
)
set.seed(708L)
both_customized <- lgb.train(
data = lgb.Dataset(
train$data
, label = train$label
)
, nrounds = 20L
, params = list(
objective = "regression"
, metric = "l2"
, num_leaves = 5L
, nrounds = nrounds
, save_name = tempfile(fileext = ".model")
)
)
top_level_l2 <- top_level_bst$eval_train()[[1L]][["value"]]
params_l2 <- param_bst$eval_train()[[1L]][["value"]]
both_l2 <- both_customized$eval_train()[[1L]][["value"]]
# check type just to be sure the subsetting didn't return a NULL
expect_true(is.numeric(top_level_l2))
expect_true(is.numeric(params_l2))
expect_true(is.numeric(both_l2))
# check that model produces identical performance
expect_identical(top_level_l2, params_l2)
expect_identical(both_l2, params_l2)
expect_identical(param_bst$current_iter(), top_level_bst$current_iter())
expect_identical(param_bst$current_iter(), both_customized$current_iter())
expect_identical(param_bst$current_iter(), nrounds)
})
test_that("lgb.train() throws an informative error if 'data' is not an lgb.Dataset", {
bad_values <- list(
4L
......
......@@ -153,7 +153,7 @@ Core Parameters
- **Note**: can be used only in CLI version
- ``num_iterations`` :raw-html:`<a id="num_iterations" title="Permalink to this parameter" href="#num_iterations">&#x1F517;&#xFE0E;</a>`, default = ``100``, type = int, aliases: ``num_iteration``, ``n_iter``, ``num_tree``, ``num_trees``, ``num_round``, ``num_rounds``, ``num_boost_round``, ``n_estimators``, ``max_iter``, constraints: ``num_iterations >= 0``
- ``num_iterations`` :raw-html:`<a id="num_iterations" title="Permalink to this parameter" href="#num_iterations">&#x1F517;&#xFE0E;</a>`, default = ``100``, type = int, aliases: ``num_iteration``, ``n_iter``, ``num_tree``, ``num_trees``, ``num_round``, ``num_rounds``, ``nrounds``, ``num_boost_round``, ``n_estimators``, ``max_iter``, constraints: ``num_iterations >= 0``
- number of boosting iterations
......
......@@ -161,7 +161,7 @@ struct Config {
// desc = **Note**: can be used only in CLI version
std::vector<std::string> valid;
// alias = num_iteration, n_iter, num_tree, num_trees, num_round, num_rounds, num_boost_round, n_estimators, max_iter
// alias = num_iteration, n_iter, num_tree, num_trees, num_round, num_rounds, nrounds, num_boost_round, n_estimators, max_iter
// check = >=0
// desc = number of boosting iterations
// desc = **Note**: internally, LightGBM constructs ``num_class * num_iterations`` trees for multi-class classification problems
......
......@@ -386,6 +386,7 @@ class _ConfigAliases:
"num_trees",
"num_round",
"num_rounds",
"nrounds",
"num_boost_round",
"n_estimators",
"max_iter"},
......
......@@ -33,6 +33,7 @@ const std::unordered_map<std::string, std::string>& Config::alias_table() {
{"num_trees", "num_iterations"},
{"num_round", "num_iterations"},
{"num_rounds", "num_iterations"},
{"nrounds", "num_iterations"},
{"num_boost_round", "num_iterations"},
{"n_estimators", "num_iterations"},
{"max_iter", "num_iterations"},
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment