Unverified Commit 7b10baff authored by James Lamb's avatar James Lamb Committed by GitHub
Browse files

[R-package] reduce verbosity in tests using lgb.train() (#4896)

* [R-package] reduce verbosity in tests using lgb.train()

* Update R-package/tests/testthat/test_lgb.Booster.R
parent 8a34b1af
VERBOSITY <- as.integer(
Sys.getenv("LIGHTGBM_TEST_VERBOSITY", "-1")
)
context("lightgbm()")
ON_WINDOWS <- .Platform$OS.type == "windows"
......@@ -337,6 +341,7 @@ test_that("training continuation works", {
, metric = "binary_logloss"
, num_leaves = 5L
, learning_rate = 1.0
, verbose = VERBOSITY
)
# train for 10 consecutive iterations
......@@ -538,6 +543,7 @@ test_that("lgb.train() works as expected with multiple eval metrics", {
objective = "binary"
, metric = metrics
, learning_rate = 1.0
, verbose = VERBOSITY
)
, valids = list(
"train" = lgb.Dataset(
......@@ -557,7 +563,11 @@ test_that("lgb.train() works as expected with multiple eval metrics", {
test_that("lgb.train() rejects negative or 0 value passed to nrounds", {
dtrain <- lgb.Dataset(train$data, label = train$label)
params <- list(objective = "regression", metric = "l2,l1")
params <- list(
objective = "regression"
, metric = "l2,l1"
, verbose = VERBOSITY
)
for (nround_value in c(-10L, 0L)) {
expect_error({
bst <- lgb.train(
......@@ -585,6 +595,7 @@ test_that("lgb.train() accepts nrounds as either a top-level argument or paramet
, metric = "l2"
, num_leaves = 5L
, save_name = tempfile(fileext = ".model")
, verbose = VERBOSITY
)
)
......@@ -600,6 +611,7 @@ test_that("lgb.train() accepts nrounds as either a top-level argument or paramet
, num_leaves = 5L
, nrounds = nrounds
, save_name = tempfile(fileext = ".model")
, verbose = VERBOSITY
)
)
......@@ -616,6 +628,7 @@ test_that("lgb.train() accepts nrounds as either a top-level argument or paramet
, num_leaves = 5L
, nrounds = nrounds
, save_name = tempfile(fileext = ".model")
, verbose = VERBOSITY
)
)
......@@ -651,7 +664,11 @@ test_that("lgb.train() throws an informative error if 'data' is not an lgb.Datas
for (val in bad_values) {
expect_error({
bst <- lgb.train(
params = list(objective = "regression", metric = "l2,l1")
params = list(
objective = "regression"
, metric = "l2,l1"
, verbose = VERBOSITY
)
, data = val
, 10L
)
......@@ -666,7 +683,11 @@ test_that("lgb.train() throws an informative error if 'valids' is not a list of
)
expect_error({
bst <- lgb.train(
params = list(objective = "regression", metric = "l2,l1")
params = list(
objective = "regression"
, metric = "l2,l1"
, verbose = VERBOSITY
)
, data = lgb.Dataset(train$data, label = train$label)
, 10L
, valids = valids
......@@ -681,7 +702,11 @@ test_that("lgb.train() errors if 'valids' is a list of lgb.Dataset objects but s
)
expect_error({
bst <- lgb.train(
params = list(objective = "regression", metric = "l2,l1")
params = list(
objective = "regression"
, metric = "l2,l1"
, verbose = VERBOSITY
)
, data = lgb.Dataset(train$data, label = train$label)
, 10L
, valids = valids
......@@ -696,7 +721,11 @@ test_that("lgb.train() throws an informative error if 'valids' contains lgb.Data
)
expect_error({
bst <- lgb.train(
params = list(objective = "regression", metric = "l2,l1")
params = list(
objective = "regression"
, metric = "l2,l1"
, verbose = VERBOSITY
)
, data = lgb.Dataset(train$data, label = train$label)
, 10L
, valids = valids
......@@ -715,6 +744,7 @@ test_that("lgb.train() works with force_col_wise and force_row_wise", {
objective = "binary"
, metric = "binary_error"
, force_col_wise = TRUE
, verbose = VERBOSITY
)
bst_col_wise <- lgb.train(
params = params
......@@ -726,6 +756,7 @@ test_that("lgb.train() works with force_col_wise and force_row_wise", {
objective = "binary"
, metric = "binary_error"
, force_row_wise = TRUE
, verbose = VERBOSITY
)
bst_row_wise <- lgb.train(
params = params
......@@ -764,6 +795,7 @@ test_that("lgb.train() works as expected with sparse features", {
objective = "binary"
, min_data = 1L
, min_data_in_bin = 1L
, verbose = VERBOSITY
)
, data = dtrain
, nrounds = nrounds
......@@ -804,6 +836,7 @@ test_that("lgb.train() works with early stopping for classification", {
params = list(
objective = "binary"
, metric = "binary_error"
, verbose = VERBOSITY
)
, data = dtrain
, nrounds = nrounds
......@@ -827,6 +860,7 @@ test_that("lgb.train() works with early stopping for classification", {
objective = "binary"
, metric = "binary_error"
, early_stopping_rounds = early_stopping_rounds
, verbose = VERBOSITY
)
, data = dtrain
, nrounds = nrounds
......@@ -875,6 +909,7 @@ test_that("lgb.train() treats early_stopping_rounds<=0 as disabling early stoppi
params = list(
objective = "binary"
, metric = "binary_error"
, verbose = VERBOSITY
)
, data = dtrain
, nrounds = nrounds
......@@ -898,6 +933,7 @@ test_that("lgb.train() treats early_stopping_rounds<=0 as disabling early stoppi
objective = "binary"
, metric = "binary_error"
, n_iter_no_change = value
, verbose = VERBOSITY
)
, data = dtrain
, nrounds = nrounds
......@@ -937,6 +973,7 @@ test_that("lgb.train() works with early stopping for classification with a metri
, metric = "auc"
, max_depth = 3L
, early_stopping_rounds = early_stopping_rounds
, verbose = VERBOSITY
)
, data = dtrain
, nrounds = nrounds
......@@ -950,6 +987,7 @@ test_that("lgb.train() works with early stopping for classification with a metri
, metric = "binary_error"
, max_depth = 3L
, early_stopping_rounds = early_stopping_rounds
, verbose = VERBOSITY
)
, data = dtrain
, nrounds = nrounds
......@@ -1008,6 +1046,7 @@ test_that("lgb.train() works with early stopping for regression", {
params = list(
objective = "regression"
, metric = "rmse"
, verbose = VERBOSITY
)
, data = dtrain
, nrounds = nrounds
......@@ -1031,6 +1070,7 @@ test_that("lgb.train() works with early stopping for regression", {
objective = "regression"
, metric = "rmse"
, early_stopping_rounds = early_stopping_rounds
, verbose = VERBOSITY
)
, data = dtrain
, nrounds = nrounds
......@@ -1065,6 +1105,7 @@ test_that("lgb.train() does not stop early if early_stopping_rounds is not given
params = list(
objective = "regression"
, metric = "None"
, verbose = VERBOSITY
)
, data = DTRAIN_RANDOM_REGRESSION
, nrounds = nrounds
......@@ -1108,12 +1149,14 @@ test_that("If first_metric_only is not given or is FALSE, lgb.train() decides to
objective = "regression"
, metric = "None"
, early_stopping_rounds = early_stopping_rounds
, verbose = VERBOSITY
)
, list(
objective = "regression"
, metric = "None"
, early_stopping_rounds = early_stopping_rounds
, first_metric_only = FALSE
, verbose = VERBOSITY
)
)
......@@ -1176,6 +1219,7 @@ test_that("If first_metric_only is TRUE, lgb.train() decides to stop early based
, metric = "None"
, early_stopping_rounds = early_stopping_rounds
, first_metric_only = TRUE
, verbose = VERBOSITY
)
, data = DTRAIN_RANDOM_REGRESSION
, nrounds = nrounds
......@@ -1221,6 +1265,7 @@ test_that("lgb.train() works when a mixture of functions and strings are passed
params = list(
objective = "regression"
, metric = "None"
, verbose = VERBOSITY
)
, data = DTRAIN_RANDOM_REGRESSION
, nrounds = nrounds
......@@ -1276,6 +1321,7 @@ test_that("lgb.train() works when a list of strings or a character vector is pas
params = list(
objective = "binary"
, metric = "None"
, verbose = VERBOSITY
)
, data = DTRAIN_RANDOM_CLASSIFICATION
, nrounds = nrounds
......@@ -1312,6 +1358,7 @@ test_that("lgb.train() works when you specify both 'metric' and 'eval' with stri
params = list(
objective = "binary"
, metric = "binary_error"
, verbose = VERBOSITY
)
, data = DTRAIN_RANDOM_CLASSIFICATION
, nrounds = nrounds
......@@ -1343,6 +1390,7 @@ test_that("lgb.train() works when you give a function for eval", {
params = list(
objective = "binary"
, metric = "None"
, verbose = VERBOSITY
)
, data = DTRAIN_RANDOM_CLASSIFICATION
, nrounds = nrounds
......@@ -1391,6 +1439,7 @@ test_that("lgb.train() works with early stopping for regression with a metric th
)
, min_data_in_bin = 5L
, early_stopping_rounds = early_stopping_rounds
, verbose = VERBOSITY
)
, data = dtrain
, nrounds = nrounds
......@@ -1430,6 +1479,7 @@ test_that("lgb.train() supports non-ASCII feature names", {
, obj = "regression"
, params = list(
metric = "rmse"
, verbose = VERBOSITY
)
, colnames = feature_names
)
......@@ -1512,6 +1562,7 @@ test_that("when early stopping is not activated, best_iter and best_score come f
, metric = "rmse"
, learning_rate = 1.5
, num_leaves = 5L
, verbose = VERBOSITY
)
# example 1: two valids, neither are the training data
......@@ -1671,6 +1722,7 @@ test_that("lightgbm.train() gives the correct best_score and best_iter for a met
, metric = "auc"
, learning_rate = 1.5
, num_leaves = 5L
, verbose = VERBOSITY
)
)
# note that "something-random-we-would-not-hardcode" was recognized as the training
......@@ -1915,7 +1967,7 @@ test_that("lgb.train() fit on linearly-relatead data improves when using linear
params <- list(
objective = "regression"
, verbose = -1L
, verbose = VERBOSITY
, metric = "mse"
, seed = 0L
, num_leaves = 2L
......@@ -1949,7 +2001,7 @@ test_that("lgb.train() w/ linear learner fails already-constructed dataset with
set.seed(708L)
params <- list(
objective = "regression"
, verbose = -1L
, verbose = VERBOSITY
, metric = "mse"
, seed = 0L
, num_leaves = 2L
......@@ -1986,7 +2038,7 @@ test_that("lgb.train() works with linear learners even if Dataset has missing va
params <- list(
objective = "regression"
, verbose = -1L
, verbose = VERBOSITY
, metric = "mse"
, seed = 0L
, num_leaves = 2L
......@@ -2032,7 +2084,7 @@ test_that("lgb.train() works with linear learners, bagging, and a Dataset that h
params <- list(
objective = "regression"
, verbose = -1L
, verbose = VERBOSITY
, metric = "mse"
, seed = 0L
, num_leaves = 2L
......
VERBOSITY <- as.integer(
Sys.getenv("LIGHTGBM_TEST_VERBOSITY", "-1")
)
context("Test models with custom objective")
data(agaricus.train, package = "lightgbm")
......@@ -36,6 +40,7 @@ param <- list(
, learning_rate = 1.0
, objective = logregobj
, metric = "auc"
, verbose = VERBOSITY
)
num_round <- 10L
......@@ -50,6 +55,7 @@ test_that("using a custom objective, custom eval, and no other metrics works", {
params = list(
num_leaves = 8L
, learning_rate = 1.0
, verbose = VERBOSITY
)
, data = dtrain
, nrounds = 4L
......
VERBOSITY <- as.integer(
Sys.getenv("LIGHTGBM_TEST_VERBOSITY", "-1")
)
context("testing lgb.Dataset functionality")
data(agaricus.train, package = "lightgbm")
......@@ -368,6 +372,7 @@ test_that("lgb.Dataset: should be able to run lgb.train() immediately after usin
, metric = "binary_logloss"
, num_leaves = 5L
, learning_rate = 1.0
, verbose = VERBOSITY
)
# should be able to train right away
......
VERBOSITY <- as.integer(
Sys.getenv("LIGHTGBM_TEST_VERBOSITY", "-1")
)
context("Learning to rank")
# numerical tolerance to use when checking metric values
......@@ -25,6 +29,7 @@ test_that("learning-to-rank with lgb.train() works as expected", {
, ndcg_at = ndcg_at
, lambdarank_truncation_level = 3L
, learning_rate = 0.001
, verbose = VERBOSITY
)
model <- lgb.train(
params = params
......
VERBOSITY <- as.integer(
Sys.getenv("LIGHTGBM_TEST_VERBOSITY", "-1")
)
context("Booster")
ON_WINDOWS <- .Platform$OS.type == "windows"
......@@ -12,7 +16,7 @@ test_that("Booster$finalize() should not fail", {
, params = list(
objective = "regression"
)
, verbose = -1L
, verbose = VERBOSITY
, nrounds = 3L
)
expect_true(lgb.is.Booster(bst))
......@@ -65,6 +69,7 @@ test_that("lgb.get.eval.result() should throw an informative error for incorrect
, metric = "l2"
, min_data = 1L
, learning_rate = 1.0
, verbose = VERBOSITY
)
, data = dtrain
, nrounds = 5L
......@@ -98,6 +103,7 @@ test_that("lgb.get.eval.result() should throw an informative error for incorrect
, metric = "l2"
, min_data = 1L
, learning_rate = 1.0
, verbose = VERBOSITY
)
, data = dtrain
, nrounds = 5L
......@@ -133,6 +139,7 @@ test_that("lgb.load() gives the expected error messages given different incorrec
objective = "binary"
, num_leaves = 4L
, learning_rate = 1.0
, verbose = VERBOSITY
)
, nrounds = 2L
, save_name = tempfile(fileext = ".model")
......@@ -179,6 +186,7 @@ test_that("Loading a Booster from a text file works", {
num_leaves = 4L
, learning_rate = 1.0
, objective = "binary"
, verbose = VERBOSITY
)
, nrounds = 2L
, save_name = tempfile(fileext = ".model")
......@@ -221,6 +229,7 @@ test_that("boosters with linear models at leaves can be written to text file and
data = dtrain
, nrounds = 10L
, params = params
, verbose = VERBOSITY
)
expect_true(lgb.is.Booster(bst))
......@@ -254,6 +263,7 @@ test_that("Loading a Booster from a string works", {
num_leaves = 4L
, learning_rate = 1.0
, objective = "binary"
, verbose = VERBOSITY
)
, nrounds = 2L
, save_name = tempfile(fileext = ".model")
......@@ -289,7 +299,7 @@ test_that("Saving a large model to string should work", {
)
, nrounds = 500L
, save_name = tempfile(fileext = ".model")
, verbose = -1L
, verbose = VERBOSITY
)
pred <- predict(bst, train$data)
......@@ -333,7 +343,7 @@ test_that("Saving a large model to JSON should work", {
)
, nrounds = 200L
, save_name = tempfile(fileext = ".model")
, verbose = -1L
, verbose = VERBOSITY
)
model_json <- bst$dump_model()
......@@ -360,6 +370,7 @@ test_that("If a string and a file are both passed to lgb.load() the file is used
num_leaves = 4L
, learning_rate = 1.0
, objective = "binary"
, verbose = VERBOSITY
)
, nrounds = 2L
, save_name = tempfile(fileext = ".model")
......@@ -396,6 +407,7 @@ test_that("Creating a Booster from a Dataset should work", {
bst <- Booster$new(
params = list(
objective = "binary"
, verbose = VERBOSITY
),
train_set = dtrain
)
......@@ -416,6 +428,7 @@ test_that("Creating a Booster from a Dataset with an existing predictor should w
num_leaves = 4L
, learning_rate = 1.0
, objective = "binary"
, verbose = VERBOSITY
)
, nrounds = nrounds
, save_name = tempfile(fileext = ".model")
......@@ -428,6 +441,9 @@ test_that("Creating a Booster from a Dataset with an existing predictor should w
)
bst_from_ds <- Booster$new(
train_set = dtest
, params = list(
verbose = VERBOSITY
)
)
expect_true(lgb.is.Booster(bst))
expect_equal(bst$current_iter(), nrounds)
......@@ -449,6 +465,7 @@ test_that("Booster$eval() should work on a Dataset stored in a binary file", {
objective = "regression"
, metric = "l2"
, num_leaves = 4L
, verbose = VERBOSITY
)
, data = dtrain
, nrounds = 2L
......@@ -505,6 +522,7 @@ test_that("Booster$rollback_one_iter() should work as expected", {
num_leaves = 4L
, learning_rate = 1.0
, objective = "binary"
, verbose = VERBOSITY
)
, nrounds = nrounds
, save_name = tempfile(fileext = ".model")
......@@ -539,6 +557,7 @@ test_that("Booster$update() passing a train_set works as expected", {
num_leaves = 4L
, learning_rate = 1.0
, objective = "binary"
, verbose = VERBOSITY
)
, nrounds = nrounds
, save_name = tempfile(fileext = ".model")
......@@ -562,6 +581,7 @@ test_that("Booster$update() passing a train_set works as expected", {
num_leaves = 4L
, learning_rate = 1.0
, objective = "binary"
, verbose = VERBOSITY
)
, nrounds = nrounds + 1L
, save_name = tempfile(fileext = ".model")
......@@ -587,6 +607,7 @@ test_that("Booster$update() throws an informative error if you provide a non-Dat
num_leaves = 4L
, learning_rate = 1.0
, objective = "binary"
, verbose = VERBOSITY
)
, nrounds = nrounds
, save_name = tempfile(fileext = ".model")
......@@ -614,6 +635,7 @@ test_that("Booster should store parameters and Booster$reset_parameter() should
, metric = c("multi_logloss", "multi_error")
, boosting = "gbdt"
, num_class = 5L
, verbose = VERBOSITY
)
bst <- Booster$new(
params = params
......@@ -640,6 +662,7 @@ test_that("Booster$params should include dataset params, before and after Booste
objective = "binary"
, max_depth = 4L
, bagging_fraction = 0.8
, verbose = VERBOSITY
)
bst <- Booster$new(
params = params
......@@ -651,6 +674,7 @@ test_that("Booster$params should include dataset params, before and after Booste
objective = "binary"
, max_depth = 4L
, bagging_fraction = 0.8
, verbose = VERBOSITY
, max_bin = 17L
)
)
......@@ -661,6 +685,7 @@ test_that("Booster$params should include dataset params, before and after Booste
objective = "binary"
, max_depth = 4L
, bagging_fraction = 0.9
, verbose = VERBOSITY
, max_bin = 17L
)
expect_identical(ret_bst$params, expected_params)
......@@ -680,6 +705,7 @@ test_that("Saving a model with different feature importance types works", {
num_leaves = 4L
, learning_rate = 1.0
, objective = "binary"
, verbose = VERBOSITY
)
, nrounds = 2L
, save_name = tempfile(fileext = ".model")
......@@ -735,6 +761,7 @@ test_that("Saving a model with unknown importance type fails", {
num_leaves = 4L
, learning_rate = 1.0
, objective = "binary"
, verbose = VERBOSITY
)
, nrounds = 2L
, save_name = tempfile(fileext = ".model")
......@@ -770,7 +797,7 @@ test_that("all parameters are stored correctly with save_model_to_string()", {
)
, data = dtrain
, nrounds = nrounds
, verbose = 0L
, verbose = VERBOSITY
)
model_str <- bst$save_model_to_string()
......@@ -787,7 +814,7 @@ test_that("all parameters are stored correctly with save_model_to_string()", {
expect_equal(sum(params_in_file == "[objective: regression]"), 1L)
expect_equal(sum(grepl(pattern = "^\\[verbosity\\:", x = params_in_file)), 1L)
expect_equal(sum(params_in_file == "[verbosity: 0]"), 1L)
expect_equal(sum(params_in_file == sprintf("[verbosity: %i]", VERBOSITY)), 1L)
# early stopping should be off by default
expect_equal(sum(grepl(pattern = "^\\[early_stopping_round\\:", x = params_in_file)), 1L)
......@@ -833,7 +860,7 @@ test_that("early_stopping, num_iterations are stored correctly in model string e
, valids = list(
"random_valid" = dvalid
)
, verbose = 0L
, verbose = VERBOSITY
)
model_str <- bst$save_model_to_string()
......@@ -864,7 +891,7 @@ test_that("Booster: method calls Booster with a null handle should raise an info
, num_leaves = 8L
)
, data = dtrain
, verbose = -1L
, verbose = VERBOSITY
, nrounds = 5L
, valids = list(
train = dtrain
......@@ -937,7 +964,12 @@ test_that("Booster$new() using a Dataset with a null handle should raise an info
rm(dtrain)
dtrain <- readRDS(tmp_file)
expect_error({
bst <- Booster$new(train_set = dtrain)
bst <- Booster$new(
train_set = dtrain
, params = list(
verbose = VERBOSITY
)
)
}, regexp = "lgb.Booster: cannot create Booster handle")
})
......@@ -969,6 +1001,7 @@ test_that("lgb.cv() correctly handles passing through params to the model file",
, n_iter = n_iter
, early_stopping_round = early_stopping_round
, n_iter_no_change = n_iter_no_change
, verbose = VERBOSITY
)
cv_bst <- lgb.cv(
......@@ -977,7 +1010,7 @@ test_that("lgb.cv() correctly handles passing through params to the model file",
, nrounds = nrounds_kwarg
, early_stopping_rounds = early_stopping_round_kwarg
, nfold = 3L
, verbose = 0L
, verbose = VERBOSITY
)
for (bst in cv_bst$boosters) {
......@@ -1014,6 +1047,7 @@ test_that("params (including dataset params) should be stored in .rds file for B
objective = "binary"
, max_depth = 4L
, bagging_fraction = 0.8
, verbose = VERBOSITY
)
bst <- Booster$new(
params = params
......@@ -1029,6 +1063,7 @@ test_that("params (including dataset params) should be stored in .rds file for B
objective = "binary"
, max_depth = 4L
, bagging_fraction = 0.8
, verbose = VERBOSITY
, max_bin = 17L
)
)
......@@ -1049,6 +1084,7 @@ test_that("params (including dataset params) should be stored in .rds file for B
objective = "binary"
, max_depth = 4L
, bagging_fraction = 0.8
, verbose = VERBOSITY
)
bst <- Booster$new(
params = params
......@@ -1064,6 +1100,7 @@ test_that("params (including dataset params) should be stored in .rds file for B
objective = "binary"
, max_depth = 4L
, bagging_fraction = 0.8
, verbose = VERBOSITY
, max_bin = 17L
)
)
......@@ -1071,7 +1108,15 @@ test_that("params (including dataset params) should be stored in .rds file for B
test_that("Handle is automatically restored when calling predict", {
data(agaricus.train, package = "lightgbm")
bst <- lightgbm(agaricus.train$data, agaricus.train$label, nrounds = 5L, obj = "binary")
bst <- lightgbm(
agaricus.train$data
, agaricus.train$label
, nrounds = 5L
, obj = "binary"
, params = list(
verbose = VERBOSITY
)
)
bst_file <- tempfile(fileext = ".rds")
saveRDS(bst, file = bst_file)
......@@ -1092,7 +1137,7 @@ test_that("boosters with linear models at leaves work with saveRDS.lgb.Booster a
params <- list(
objective = "regression"
, verbose = -1L
, verbose = VERBOSITY
, metric = "mse"
, seed = 0L
, num_leaves = 2L
......@@ -1129,7 +1174,7 @@ test_that("boosters with linear models at leaves can be written to RDS and re-lo
params <- list(
objective = "regression"
, verbose = -1L
, verbose = VERBOSITY
, metric = "mse"
, seed = 0L
, num_leaves = 2L
......@@ -1190,7 +1235,7 @@ test_that("Booster's print, show, and summary work correctly", {
, data = lgb.Dataset(
as.matrix(mtcars[, -1L])
, label = mtcars$mpg)
, verbose = 0L
, verbose = VERBOSITY
, nrounds = 5L
)
.check_methods_work(model)
......@@ -1202,7 +1247,7 @@ test_that("Booster's print, show, and summary work correctly", {
as.matrix(iris[, -5L])
, label = as.numeric(factor(iris$Species)) - 1.0
)
, verbose = 0L
, verbose = VERBOSITY
, nrounds = 5L
)
.check_methods_work(model)
......@@ -1235,7 +1280,7 @@ test_that("Booster's print, show, and summary work correctly", {
)
, obj = .logregobj
, eval = .evalerror
, verbose = 0L
, verbose = VERBOSITY
, nrounds = 5L
)
......@@ -1249,7 +1294,7 @@ test_that("LGBM_BoosterGetNumFeature_R returns correct outputs", {
, data = lgb.Dataset(
as.matrix(mtcars[, -1L])
, label = mtcars$mpg)
, verbose = 0L
, verbose = VERBOSITY
, nrounds = 5L
)
ncols <- .Call(LGBM_BoosterGetNumFeature_R, model$.__enclos_env__$private$handle)
......@@ -1262,7 +1307,7 @@ test_that("LGBM_BoosterGetNumFeature_R returns correct outputs", {
as.matrix(iris[, -5L])
, label = as.numeric(factor(iris$Species)) - 1.0
)
, verbose = 0L
, verbose = VERBOSITY
, nrounds = 5L
)
ncols <- .Call(LGBM_BoosterGetNumFeature_R, model$.__enclos_env__$private$handle)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment