Unverified Commit 44928d3a authored by James Lamb's avatar James Lamb Committed by GitHub
Browse files

[R-package] consolidate testing constants in helpers file (#5992)

parent 8967debe
# ref for this file: # ref for this file:
# #
# * https://r-pkgs.org/testing-design.html#testthat-helper-files
# * https://r-pkgs.org/testing-design.html#testthat-setup-files # * https://r-pkgs.org/testing-design.html#testthat-setup-files
# LightGBM-internal fix to comply with CRAN policy of only using up to 2 threads in tests and example. # LightGBM-internal fix to comply with CRAN policy of only using up to 2 threads in tests and example.
...@@ -10,3 +11,21 @@ ...@@ -10,3 +11,21 @@
# the check farm is a shared resource and will typically be running many checks simultaneously. # the check farm is a shared resource and will typically be running many checks simultaneously.
# #
.LGB_MAX_THREADS <- 2L .LGB_MAX_THREADS <- 2L
# by default, how much should results in tests be allowed to differ from hard-coded expected numbers?
.LGB_NUMERIC_TOLERANCE <- 1e-6
# are the tests running on Windows?
.LGB_ON_WINDOWS <- .Platform$OS.type == "windows"
.LGB_ON_32_BIT_WINDOWS <- .LGB_ON_WINDOWS && .Machine$sizeof.pointer != 8L
# are the tests running in a UTF-8 locale?
.LGB_UTF8_LOCALE <- all(endsWith(
Sys.getlocale(category = "LC_CTYPE")
, "UTF-8"
))
# control how many loud LightGBM's logger is in tests
.LGB_VERBOSITY <- as.integer(
Sys.getenv("LIGHTGBM_TEST_VERBOSITY", "-1")
)
library(Matrix) library(Matrix)
VERBOSITY <- as.integer(
Sys.getenv("LIGHTGBM_TEST_VERBOSITY", "-1")
)
TOLERANCE <- 1e-6
test_that("Predictor$finalize() should not fail", { test_that("Predictor$finalize() should not fail", {
X <- as.matrix(as.integer(iris[, "Species"]), ncol = 1L) X <- as.matrix(as.integer(iris[, "Species"]), ncol = 1L)
y <- iris[["Sepal.Length"]] y <- iris[["Sepal.Length"]]
...@@ -16,7 +10,7 @@ test_that("Predictor$finalize() should not fail", { ...@@ -16,7 +10,7 @@ test_that("Predictor$finalize() should not fail", {
objective = "regression" objective = "regression"
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, nrounds = 3L , nrounds = 3L
) )
model_file <- tempfile(fileext = ".model") model_file <- tempfile(fileext = ".model")
...@@ -45,7 +39,7 @@ test_that("predictions do not fail for integer input", { ...@@ -45,7 +39,7 @@ test_that("predictions do not fail for integer input", {
objective = "regression" objective = "regression"
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, nrounds = 3L , nrounds = 3L
) )
X_double <- X[c(1L, 51L, 101L), , drop = FALSE] X_double <- X[c(1L, 51L, 101L), , drop = FALSE]
...@@ -78,7 +72,7 @@ test_that("start_iteration works correctly", { ...@@ -78,7 +72,7 @@ test_that("start_iteration works correctly", {
num_leaves = 4L num_leaves = 4L
, learning_rate = 0.6 , learning_rate = 0.6
, objective = "binary" , objective = "binary"
, verbosity = VERBOSITY , verbosity = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
, nrounds = 50L , nrounds = 50L
...@@ -128,7 +122,7 @@ test_that("Feature contributions from sparse inputs produce sparse outputs", { ...@@ -128,7 +122,7 @@ test_that("Feature contributions from sparse inputs produce sparse outputs", {
data = dtrain data = dtrain
, obj = "regression" , obj = "regression"
, nrounds = 5L , nrounds = 5L
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, params = list(min_data_in_leaf = 5L, num_threads = .LGB_MAX_THREADS) , params = list(min_data_in_leaf = 5L, num_threads = .LGB_MAX_THREADS)
) )
...@@ -159,7 +153,7 @@ test_that("Sparse feature contribution predictions do not take inputs with wrong ...@@ -159,7 +153,7 @@ test_that("Sparse feature contribution predictions do not take inputs with wrong
data = dtrain data = dtrain
, obj = "regression" , obj = "regression"
, nrounds = 5L , nrounds = 5L
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, params = list(min_data_in_leaf = 5L, num_threads = .LGB_MAX_THREADS) , params = list(min_data_in_leaf = 5L, num_threads = .LGB_MAX_THREADS)
) )
...@@ -189,7 +183,7 @@ test_that("Feature contribution predictions do not take non-general CSR or CSC i ...@@ -189,7 +183,7 @@ test_that("Feature contribution predictions do not take non-general CSR or CSC i
data = dtrain data = dtrain
, obj = "regression" , obj = "regression"
, nrounds = 5L , nrounds = 5L
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, params = list(min_data_in_leaf = 5L, num_threads = .LGB_MAX_THREADS) , params = list(min_data_in_leaf = 5L, num_threads = .LGB_MAX_THREADS)
) )
...@@ -217,14 +211,14 @@ test_that("predict() params should override keyword argument for raw-score predi ...@@ -217,14 +211,14 @@ test_that("predict() params should override keyword argument for raw-score predi
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
, nrounds = 10L , nrounds = 10L
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
) )
# check that the predictions from predict.lgb.Booster() really look like raw score predictions # check that the predictions from predict.lgb.Booster() really look like raw score predictions
preds_prob <- predict(bst, X) preds_prob <- predict(bst, X)
preds_raw_s3_keyword <- predict(bst, X, type = "raw") preds_raw_s3_keyword <- predict(bst, X, type = "raw")
preds_prob_from_raw <- 1.0 / (1.0 + exp(-preds_raw_s3_keyword)) preds_prob_from_raw <- 1.0 / (1.0 + exp(-preds_raw_s3_keyword))
expect_equal(preds_prob, preds_prob_from_raw, tolerance = TOLERANCE) expect_equal(preds_prob, preds_prob_from_raw, tolerance = .LGB_NUMERIC_TOLERANCE)
accuracy <- sum(as.integer(preds_prob_from_raw > 0.5) == y) / length(y) accuracy <- sum(as.integer(preds_prob_from_raw > 0.5) == y) / length(y)
expect_equal(accuracy, 1.0) expect_equal(accuracy, 1.0)
...@@ -269,7 +263,7 @@ test_that("predict() params should override keyword argument for leaf-index pred ...@@ -269,7 +263,7 @@ test_that("predict() params should override keyword argument for leaf-index pred
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
, nrounds = 10L , nrounds = 10L
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
) )
# check that predictions really look like leaf index predictions # check that predictions really look like leaf index predictions
...@@ -323,7 +317,7 @@ test_that("predict() params should override keyword argument for feature contrib ...@@ -323,7 +317,7 @@ test_that("predict() params should override keyword argument for feature contrib
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
, nrounds = 10L , nrounds = 10L
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
) )
# check that predictions really look like feature contributions # check that predictions really look like feature contributions
...@@ -431,7 +425,7 @@ test_that("predict() keeps row names from data (regression)", { ...@@ -431,7 +425,7 @@ test_that("predict() keeps row names from data (regression)", {
data = dtrain data = dtrain
, obj = "regression" , obj = "regression"
, nrounds = 5L , nrounds = 5L
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, params = list(min_data_in_leaf = 1L, num_threads = .LGB_MAX_THREADS) , params = list(min_data_in_leaf = 1L, num_threads = .LGB_MAX_THREADS)
) )
.check_all_row_name_expectations(bst, X) .check_all_row_name_expectations(bst, X)
...@@ -447,7 +441,7 @@ test_that("predict() keeps row names from data (binary classification)", { ...@@ -447,7 +441,7 @@ test_that("predict() keeps row names from data (binary classification)", {
data = dtrain data = dtrain
, obj = "binary" , obj = "binary"
, nrounds = 5L , nrounds = 5L
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, params = list(num_threads = .LGB_MAX_THREADS) , params = list(num_threads = .LGB_MAX_THREADS)
) )
.check_all_row_name_expectations(bst, X) .check_all_row_name_expectations(bst, X)
...@@ -464,7 +458,7 @@ test_that("predict() keeps row names from data (multi-class classification)", { ...@@ -464,7 +458,7 @@ test_that("predict() keeps row names from data (multi-class classification)", {
, obj = "multiclass" , obj = "multiclass"
, params = list(num_class = 3L, num_threads = .LGB_MAX_THREADS) , params = list(num_class = 3L, num_threads = .LGB_MAX_THREADS)
, nrounds = 5L , nrounds = 5L
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
) )
.check_all_row_name_expectations(bst, X) .check_all_row_name_expectations(bst, X)
}) })
...@@ -485,7 +479,7 @@ test_that("predictions for regression and binary classification are returned as ...@@ -485,7 +479,7 @@ test_that("predictions for regression and binary classification are returned as
data = dtrain data = dtrain
, obj = "regression" , obj = "regression"
, nrounds = 5L , nrounds = 5L
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, params = list(min_data_in_leaf = 1L, num_threads = .LGB_MAX_THREADS) , params = list(min_data_in_leaf = 1L, num_threads = .LGB_MAX_THREADS)
) )
pred <- predict(model, X) pred <- predict(model, X)
...@@ -503,7 +497,7 @@ test_that("predictions for regression and binary classification are returned as ...@@ -503,7 +497,7 @@ test_that("predictions for regression and binary classification are returned as
data = dtrain data = dtrain
, obj = "binary" , obj = "binary"
, nrounds = 5L , nrounds = 5L
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, params = list(num_threads = .LGB_MAX_THREADS) , params = list(num_threads = .LGB_MAX_THREADS)
) )
pred <- predict(model, X) pred <- predict(model, X)
...@@ -523,7 +517,7 @@ test_that("predictions for multiclass classification are returned as matrix", { ...@@ -523,7 +517,7 @@ test_that("predictions for multiclass classification are returned as matrix", {
data = dtrain data = dtrain
, obj = "multiclass" , obj = "multiclass"
, nrounds = 5L , nrounds = 5L
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, params = list(num_class = 3L, num_threads = .LGB_MAX_THREADS) , params = list(num_class = 3L, num_threads = .LGB_MAX_THREADS)
) )
pred <- predict(model, X) pred <- predict(model, X)
...@@ -668,7 +662,7 @@ test_that("predict type='class' returns predicted class for classification objec ...@@ -668,7 +662,7 @@ test_that("predict type='class' returns predicted class for classification objec
data = dtrain data = dtrain
, obj = "binary" , obj = "binary"
, nrounds = 5L , nrounds = 5L
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, params = list(num_threads = .LGB_MAX_THREADS) , params = list(num_threads = .LGB_MAX_THREADS)
) )
pred <- predict(bst, X, type = "class") pred <- predict(bst, X, type = "class")
...@@ -682,7 +676,7 @@ test_that("predict type='class' returns predicted class for classification objec ...@@ -682,7 +676,7 @@ test_that("predict type='class' returns predicted class for classification objec
data = dtrain data = dtrain
, obj = "multiclass" , obj = "multiclass"
, nrounds = 5L , nrounds = 5L
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, params = list(num_class = 3L, num_threads = .LGB_MAX_THREADS) , params = list(num_class = 3L, num_threads = .LGB_MAX_THREADS)
) )
pred <- predict(model, X, type = "class") pred <- predict(model, X, type = "class")
...@@ -698,7 +692,7 @@ test_that("predict type='class' returns values in the target's range for regress ...@@ -698,7 +692,7 @@ test_that("predict type='class' returns values in the target's range for regress
data = dtrain data = dtrain
, obj = "regression" , obj = "regression"
, nrounds = 5L , nrounds = 5L
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, params = list(num_threads = .LGB_MAX_THREADS) , params = list(num_threads = .LGB_MAX_THREADS)
) )
pred <- predict(bst, X, type = "class") pred <- predict(bst, X, type = "class")
......
VERBOSITY <- as.integer(
Sys.getenv("LIGHTGBM_TEST_VERBOSITY", "-1")
)
ON_WINDOWS <- .Platform$OS.type == "windows"
UTF8_LOCALE <- all(endsWith(
Sys.getlocale(category = "LC_CTYPE")
, "UTF-8"
))
data(agaricus.train, package = "lightgbm") data(agaricus.train, package = "lightgbm")
data(agaricus.test, package = "lightgbm") data(agaricus.test, package = "lightgbm")
train <- agaricus.train train <- agaricus.train
test <- agaricus.test test <- agaricus.test
TOLERANCE <- 1e-6
set.seed(708L) set.seed(708L)
# [description] Every time this function is called, it adds 0.1 # [description] Every time this function is called, it adds 0.1
...@@ -82,7 +70,7 @@ test_that("train and predict binary classification", { ...@@ -82,7 +70,7 @@ test_that("train and predict binary classification", {
num_leaves = 5L num_leaves = 5L
, objective = "binary" , objective = "binary"
, metric = "binary_error" , metric = "binary_error"
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
, nrounds = nrounds , nrounds = nrounds
...@@ -104,7 +92,7 @@ test_that("train and predict binary classification", { ...@@ -104,7 +92,7 @@ test_that("train and predict binary classification", {
expect_equal(length(pred1), 6513L) expect_equal(length(pred1), 6513L)
err_pred1 <- sum((pred1 > 0.5) != train$label) / length(train$label) err_pred1 <- sum((pred1 > 0.5) != train$label) / length(train$label)
err_log <- record_results[1L] err_log <- record_results[1L]
expect_lt(abs(err_pred1 - err_log), TOLERANCE) expect_lt(abs(err_pred1 - err_log), .LGB_NUMERIC_TOLERANCE)
}) })
...@@ -124,7 +112,7 @@ test_that("train and predict softmax", { ...@@ -124,7 +112,7 @@ test_that("train and predict softmax", {
, objective = "multiclass" , objective = "multiclass"
, metric = "multi_error" , metric = "multi_error"
, num_class = 3L , num_class = 3L
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
, nrounds = 20L , nrounds = 20L
...@@ -155,7 +143,7 @@ test_that("use of multiple eval metrics works", { ...@@ -155,7 +143,7 @@ test_that("use of multiple eval metrics works", {
, learning_rate = 1.0 , learning_rate = 1.0
, objective = "binary" , objective = "binary"
, metric = metrics , metric = metrics
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
, nrounds = 10L , nrounds = 10L
...@@ -186,13 +174,13 @@ test_that("lgb.Booster.upper_bound() and lgb.Booster.lower_bound() work as expec ...@@ -186,13 +174,13 @@ test_that("lgb.Booster.upper_bound() and lgb.Booster.lower_bound() work as expec
num_leaves = 5L num_leaves = 5L
, objective = "binary" , objective = "binary"
, metric = "binary_error" , metric = "binary_error"
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
, nrounds = nrounds , nrounds = nrounds
) )
expect_true(abs(bst$lower_bound() - -1.590853) < TOLERANCE) expect_true(abs(bst$lower_bound() - -1.590853) < .LGB_NUMERIC_TOLERANCE)
expect_true(abs(bst$upper_bound() - 1.871015) < TOLERANCE) expect_true(abs(bst$upper_bound() - 1.871015) < .LGB_NUMERIC_TOLERANCE)
}) })
test_that("lgb.Booster.upper_bound() and lgb.Booster.lower_bound() work as expected for regression", { test_that("lgb.Booster.upper_bound() and lgb.Booster.lower_bound() work as expected for regression", {
...@@ -205,13 +193,13 @@ test_that("lgb.Booster.upper_bound() and lgb.Booster.lower_bound() work as expec ...@@ -205,13 +193,13 @@ test_that("lgb.Booster.upper_bound() and lgb.Booster.lower_bound() work as expec
num_leaves = 5L num_leaves = 5L
, objective = "regression" , objective = "regression"
, metric = "l2" , metric = "l2"
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
, nrounds = nrounds , nrounds = nrounds
) )
expect_true(abs(bst$lower_bound() - 0.1513859) < TOLERANCE) expect_true(abs(bst$lower_bound() - 0.1513859) < .LGB_NUMERIC_TOLERANCE)
expect_true(abs(bst$upper_bound() - 0.9080349) < TOLERANCE) expect_true(abs(bst$upper_bound() - 0.9080349) < .LGB_NUMERIC_TOLERANCE)
}) })
test_that("lightgbm() rejects negative or 0 value passed to nrounds", { test_that("lightgbm() rejects negative or 0 value passed to nrounds", {
...@@ -240,7 +228,7 @@ test_that("lightgbm() accepts nrounds as either a top-level argument or paramete ...@@ -240,7 +228,7 @@ test_that("lightgbm() accepts nrounds as either a top-level argument or paramete
objective = "regression" objective = "regression"
, metric = "l2" , metric = "l2"
, num_leaves = 5L , num_leaves = 5L
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
) )
...@@ -254,7 +242,7 @@ test_that("lightgbm() accepts nrounds as either a top-level argument or paramete ...@@ -254,7 +242,7 @@ test_that("lightgbm() accepts nrounds as either a top-level argument or paramete
, metric = "l2" , metric = "l2"
, num_leaves = 5L , num_leaves = 5L
, nrounds = nrounds , nrounds = nrounds
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
) )
...@@ -269,7 +257,7 @@ test_that("lightgbm() accepts nrounds as either a top-level argument or paramete ...@@ -269,7 +257,7 @@ test_that("lightgbm() accepts nrounds as either a top-level argument or paramete
, metric = "l2" , metric = "l2"
, num_leaves = 5L , num_leaves = 5L
, nrounds = nrounds , nrounds = nrounds
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
) )
...@@ -316,7 +304,7 @@ test_that("lightgbm() performs evaluation on validation sets if they are provide ...@@ -316,7 +304,7 @@ test_that("lightgbm() performs evaluation on validation sets if they are provide
"binary_error" "binary_error"
, "auc" , "auc"
) )
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
, nrounds = nrounds , nrounds = nrounds
...@@ -341,9 +329,9 @@ test_that("lightgbm() performs evaluation on validation sets if they are provide ...@@ -341,9 +329,9 @@ test_that("lightgbm() performs evaluation on validation sets if they are provide
eval_results <- bst$record_evals[[valid_name]][["binary_error"]] eval_results <- bst$record_evals[[valid_name]][["binary_error"]]
expect_length(eval_results[["eval"]], nrounds) expect_length(eval_results[["eval"]], nrounds)
} }
expect_true(abs(bst$record_evals[["train"]][["binary_error"]][["eval"]][[1L]] - 0.02226317) < TOLERANCE) expect_true(abs(bst$record_evals[["train"]][["binary_error"]][["eval"]][[1L]] - 0.02226317) < .LGB_NUMERIC_TOLERANCE)
expect_true(abs(bst$record_evals[["valid1"]][["binary_error"]][["eval"]][[1L]] - 0.02226317) < TOLERANCE) expect_true(abs(bst$record_evals[["valid1"]][["binary_error"]][["eval"]][[1L]] - 0.02226317) < .LGB_NUMERIC_TOLERANCE)
expect_true(abs(bst$record_evals[["valid2"]][["binary_error"]][["eval"]][[1L]] - 0.02226317) < TOLERANCE) expect_true(abs(bst$record_evals[["valid2"]][["binary_error"]][["eval"]][[1L]] - 0.02226317) < .LGB_NUMERIC_TOLERANCE)
}) })
test_that("training continuation works", { test_that("training continuation works", {
...@@ -359,7 +347,7 @@ test_that("training continuation works", { ...@@ -359,7 +347,7 @@ test_that("training continuation works", {
, metric = "binary_logloss" , metric = "binary_logloss"
, num_leaves = 5L , num_leaves = 5L
, learning_rate = 1.0 , learning_rate = 1.0
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
...@@ -386,7 +374,7 @@ test_that("cv works", { ...@@ -386,7 +374,7 @@ test_that("cv works", {
, metric = "l2,l1" , metric = "l2,l1"
, min_data = 1L , min_data = 1L
, learning_rate = 1.0 , learning_rate = 1.0
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
bst <- lgb.cv( bst <- lgb.cv(
...@@ -407,7 +395,7 @@ test_that("CVBooster$reset_parameter() works as expected", { ...@@ -407,7 +395,7 @@ test_that("CVBooster$reset_parameter() works as expected", {
objective = "regression" objective = "regression"
, min_data = 1L , min_data = 1L
, num_leaves = 7L , num_leaves = 7L
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
, data = dtrain , data = dtrain
...@@ -487,7 +475,7 @@ test_that("lightgbm.cv() gives the correct best_score and best_iter for a metric ...@@ -487,7 +475,7 @@ test_that("lightgbm.cv() gives the correct best_score and best_iter for a metric
, metric = "auc,binary_error" , metric = "auc,binary_error"
, learning_rate = 1.5 , learning_rate = 1.5
, num_leaves = 5L , num_leaves = 5L
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
) )
...@@ -551,7 +539,7 @@ test_that("lgb.cv() respects showsd argument", { ...@@ -551,7 +539,7 @@ test_that("lgb.cv() respects showsd argument", {
objective = "regression" objective = "regression"
, metric = "l2" , metric = "l2"
, min_data = 1L , min_data = 1L
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
nrounds <- 5L nrounds <- 5L
...@@ -594,7 +582,7 @@ test_that("lgb.cv() raises an informative error for unrecognized objectives", { ...@@ -594,7 +582,7 @@ test_that("lgb.cv() raises an informative error for unrecognized objectives", {
data = dtrain data = dtrain
, params = list( , params = list(
objective_type = "not_a_real_objective" objective_type = "not_a_real_objective"
, verbosity = VERBOSITY , verbosity = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
) )
...@@ -616,7 +604,7 @@ test_that("lgb.cv() respects parameter aliases for objective", { ...@@ -616,7 +604,7 @@ test_that("lgb.cv() respects parameter aliases for objective", {
num_leaves = 5L num_leaves = 5L
, application = "binary" , application = "binary"
, num_iterations = nrounds , num_iterations = nrounds
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
, nfold = nfold , nfold = nfold
...@@ -637,7 +625,7 @@ test_that("lgb.cv() prefers objective in params to keyword argument", { ...@@ -637,7 +625,7 @@ test_that("lgb.cv() prefers objective in params to keyword argument", {
) )
, params = list( , params = list(
application = "regression_l1" application = "regression_l1"
, verbosity = VERBOSITY , verbosity = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
, nrounds = 5L , nrounds = 5L
...@@ -673,7 +661,7 @@ test_that("lgb.cv() respects parameter aliases for metric", { ...@@ -673,7 +661,7 @@ test_that("lgb.cv() respects parameter aliases for metric", {
, objective = "binary" , objective = "binary"
, num_iterations = nrounds , num_iterations = nrounds
, metric_types = c("auc", "binary_logloss") , metric_types = c("auc", "binary_logloss")
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
, nfold = nfold , nfold = nfold
...@@ -691,7 +679,7 @@ test_that("lgb.cv() respects eval_train_metric argument", { ...@@ -691,7 +679,7 @@ test_that("lgb.cv() respects eval_train_metric argument", {
objective = "regression" objective = "regression"
, metric = "l2" , metric = "l2"
, min_data = 1L , min_data = 1L
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
nrounds <- 5L nrounds <- 5L
...@@ -739,7 +727,7 @@ test_that("lgb.train() works as expected with multiple eval metrics", { ...@@ -739,7 +727,7 @@ test_that("lgb.train() works as expected with multiple eval metrics", {
objective = "binary" objective = "binary"
, metric = metrics , metric = metrics
, learning_rate = 1.0 , learning_rate = 1.0
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
, valids = list( , valids = list(
...@@ -770,7 +758,7 @@ test_that("lgb.train() raises an informative error for unrecognized objectives", ...@@ -770,7 +758,7 @@ test_that("lgb.train() raises an informative error for unrecognized objectives",
data = dtrain data = dtrain
, params = list( , params = list(
objective_type = "not_a_real_objective" objective_type = "not_a_real_objective"
, verbosity = VERBOSITY , verbosity = .LGB_VERBOSITY
) )
) )
}, type = "message") }, type = "message")
...@@ -790,7 +778,7 @@ test_that("lgb.train() respects parameter aliases for objective", { ...@@ -790,7 +778,7 @@ test_that("lgb.train() respects parameter aliases for objective", {
num_leaves = 5L num_leaves = 5L
, application = "binary" , application = "binary"
, num_iterations = nrounds , num_iterations = nrounds
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
, valids = list( , valids = list(
...@@ -812,7 +800,7 @@ test_that("lgb.train() prefers objective in params to keyword argument", { ...@@ -812,7 +800,7 @@ test_that("lgb.train() prefers objective in params to keyword argument", {
) )
, params = list( , params = list(
loss = "regression_l1" loss = "regression_l1"
, verbosity = VERBOSITY , verbosity = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
, nrounds = 5L , nrounds = 5L
...@@ -844,7 +832,7 @@ test_that("lgb.train() respects parameter aliases for metric", { ...@@ -844,7 +832,7 @@ test_that("lgb.train() respects parameter aliases for metric", {
, objective = "binary" , objective = "binary"
, num_iterations = nrounds , num_iterations = nrounds
, metric_types = c("auc", "binary_logloss") , metric_types = c("auc", "binary_logloss")
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
, valids = list( , valids = list(
...@@ -863,7 +851,7 @@ test_that("lgb.train() rejects negative or 0 value passed to nrounds", { ...@@ -863,7 +851,7 @@ test_that("lgb.train() rejects negative or 0 value passed to nrounds", {
params <- list( params <- list(
objective = "regression" objective = "regression"
, metric = "l2,l1" , metric = "l2,l1"
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
for (nround_value in c(-10L, 0L)) { for (nround_value in c(-10L, 0L)) {
...@@ -893,7 +881,7 @@ test_that("lgb.train() accepts nrounds as either a top-level argument or paramet ...@@ -893,7 +881,7 @@ test_that("lgb.train() accepts nrounds as either a top-level argument or paramet
objective = "regression" objective = "regression"
, metric = "l2" , metric = "l2"
, num_leaves = 5L , num_leaves = 5L
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
) )
...@@ -910,7 +898,7 @@ test_that("lgb.train() accepts nrounds as either a top-level argument or paramet ...@@ -910,7 +898,7 @@ test_that("lgb.train() accepts nrounds as either a top-level argument or paramet
, metric = "l2" , metric = "l2"
, num_leaves = 5L , num_leaves = 5L
, nrounds = nrounds , nrounds = nrounds
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
) )
) )
...@@ -927,7 +915,7 @@ test_that("lgb.train() accepts nrounds as either a top-level argument or paramet ...@@ -927,7 +915,7 @@ test_that("lgb.train() accepts nrounds as either a top-level argument or paramet
, metric = "l2" , metric = "l2"
, num_leaves = 5L , num_leaves = 5L
, nrounds = nrounds , nrounds = nrounds
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
) )
...@@ -967,7 +955,7 @@ test_that("lgb.train() throws an informative error if 'data' is not an lgb.Datas ...@@ -967,7 +955,7 @@ test_that("lgb.train() throws an informative error if 'data' is not an lgb.Datas
params = list( params = list(
objective = "regression" objective = "regression"
, metric = "l2,l1" , metric = "l2,l1"
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
) )
, data = val , data = val
, 10L , 10L
...@@ -986,7 +974,7 @@ test_that("lgb.train() throws an informative error if 'valids' is not a list of ...@@ -986,7 +974,7 @@ test_that("lgb.train() throws an informative error if 'valids' is not a list of
params = list( params = list(
objective = "regression" objective = "regression"
, metric = "l2,l1" , metric = "l2,l1"
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
) )
, data = lgb.Dataset(train$data, label = train$label) , data = lgb.Dataset(train$data, label = train$label)
, 10L , 10L
...@@ -1005,7 +993,7 @@ test_that("lgb.train() errors if 'valids' is a list of lgb.Dataset objects but s ...@@ -1005,7 +993,7 @@ test_that("lgb.train() errors if 'valids' is a list of lgb.Dataset objects but s
params = list( params = list(
objective = "regression" objective = "regression"
, metric = "l2,l1" , metric = "l2,l1"
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
) )
, data = lgb.Dataset(train$data, label = train$label) , data = lgb.Dataset(train$data, label = train$label)
, 10L , 10L
...@@ -1024,7 +1012,7 @@ test_that("lgb.train() throws an informative error if 'valids' contains lgb.Data ...@@ -1024,7 +1012,7 @@ test_that("lgb.train() throws an informative error if 'valids' contains lgb.Data
params = list( params = list(
objective = "regression" objective = "regression"
, metric = "l2,l1" , metric = "l2,l1"
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
) )
, data = lgb.Dataset(train$data, label = train$label) , data = lgb.Dataset(train$data, label = train$label)
, 10L , 10L
...@@ -1045,7 +1033,7 @@ test_that("lgb.train() works with force_col_wise and force_row_wise", { ...@@ -1045,7 +1033,7 @@ test_that("lgb.train() works with force_col_wise and force_row_wise", {
objective = "binary" objective = "binary"
, metric = "binary_error" , metric = "binary_error"
, force_col_wise = TRUE , force_col_wise = TRUE
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
bst_col_wise <- lgb.train( bst_col_wise <- lgb.train(
...@@ -1058,7 +1046,7 @@ test_that("lgb.train() works with force_col_wise and force_row_wise", { ...@@ -1058,7 +1046,7 @@ test_that("lgb.train() works with force_col_wise and force_row_wise", {
objective = "binary" objective = "binary"
, metric = "binary_error" , metric = "binary_error"
, force_row_wise = TRUE , force_row_wise = TRUE
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
bst_row_wise <- lgb.train( bst_row_wise <- lgb.train(
...@@ -1099,7 +1087,7 @@ test_that("lgb.train() works as expected with sparse features", { ...@@ -1099,7 +1087,7 @@ test_that("lgb.train() works as expected with sparse features", {
objective = "binary" objective = "binary"
, min_data = 1L , min_data = 1L
, min_data_in_bin = 1L , min_data_in_bin = 1L
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
, data = dtrain , data = dtrain
...@@ -1112,7 +1100,7 @@ test_that("lgb.train() works as expected with sparse features", { ...@@ -1112,7 +1100,7 @@ test_that("lgb.train() works as expected with sparse features", {
expect_equal(parsed_model$objective, "binary sigmoid:1") expect_equal(parsed_model$objective, "binary sigmoid:1")
expect_false(parsed_model$average_output) expect_false(parsed_model$average_output)
expected_error <- 0.6931268 expected_error <- 0.6931268
expect_true(abs(bst$eval_train()[[1L]][["value"]] - expected_error) < TOLERANCE) expect_true(abs(bst$eval_train()[[1L]][["value"]] - expected_error) < .LGB_NUMERIC_TOLERANCE)
}) })
test_that("lgb.train() works with early stopping for classification", { test_that("lgb.train() works with early stopping for classification", {
...@@ -1143,7 +1131,7 @@ test_that("lgb.train() works with early stopping for classification", { ...@@ -1143,7 +1131,7 @@ test_that("lgb.train() works with early stopping for classification", {
params = list( params = list(
objective = "binary" objective = "binary"
, metric = "binary_error" , metric = "binary_error"
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
, data = dtrain , data = dtrain
...@@ -1168,7 +1156,7 @@ test_that("lgb.train() works with early stopping for classification", { ...@@ -1168,7 +1156,7 @@ test_that("lgb.train() works with early stopping for classification", {
objective = "binary" objective = "binary"
, metric = "binary_error" , metric = "binary_error"
, early_stopping_rounds = early_stopping_rounds , early_stopping_rounds = early_stopping_rounds
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
, data = dtrain , data = dtrain
...@@ -1220,7 +1208,7 @@ test_that("lgb.train() treats early_stopping_rounds<=0 as disabling early stoppi ...@@ -1220,7 +1208,7 @@ test_that("lgb.train() treats early_stopping_rounds<=0 as disabling early stoppi
params = list( params = list(
objective = "binary" objective = "binary"
, metric = "binary_error" , metric = "binary_error"
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
, data = dtrain , data = dtrain
...@@ -1245,7 +1233,7 @@ test_that("lgb.train() treats early_stopping_rounds<=0 as disabling early stoppi ...@@ -1245,7 +1233,7 @@ test_that("lgb.train() treats early_stopping_rounds<=0 as disabling early stoppi
objective = "binary" objective = "binary"
, metric = "binary_error" , metric = "binary_error"
, n_iter_no_change = value , n_iter_no_change = value
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
, data = dtrain , data = dtrain
...@@ -1288,7 +1276,7 @@ test_that("lgb.train() works with early stopping for classification with a metri ...@@ -1288,7 +1276,7 @@ test_that("lgb.train() works with early stopping for classification with a metri
, metric = "auc" , metric = "auc"
, max_depth = 3L , max_depth = 3L
, early_stopping_rounds = early_stopping_rounds , early_stopping_rounds = early_stopping_rounds
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
, data = dtrain , data = dtrain
...@@ -1303,7 +1291,7 @@ test_that("lgb.train() works with early stopping for classification with a metri ...@@ -1303,7 +1291,7 @@ test_that("lgb.train() works with early stopping for classification with a metri
, metric = "binary_error" , metric = "binary_error"
, max_depth = 3L , max_depth = 3L
, early_stopping_rounds = early_stopping_rounds , early_stopping_rounds = early_stopping_rounds
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
, data = dtrain , data = dtrain
...@@ -1322,7 +1310,7 @@ test_that("lgb.train() works with early stopping for classification with a metri ...@@ -1322,7 +1310,7 @@ test_that("lgb.train() works with early stopping for classification with a metri
) )
expect_identical(bst_binary_error$best_iter, 1L) expect_identical(bst_binary_error$best_iter, 1L)
expect_identical(bst_binary_error$current_iter(), early_stopping_rounds + 1L) expect_identical(bst_binary_error$current_iter(), early_stopping_rounds + 1L)
expect_true(abs(bst_binary_error$best_score - 0.01613904) < TOLERANCE) expect_true(abs(bst_binary_error$best_score - 0.01613904) < .LGB_NUMERIC_TOLERANCE)
# early stopping should not have been hit for AUC (higher_better = TRUE) # early stopping should not have been hit for AUC (higher_better = TRUE)
eval_info <- bst_auc$.__enclos_env__$private$get_eval_info() eval_info <- bst_auc$.__enclos_env__$private$get_eval_info()
...@@ -1333,7 +1321,7 @@ test_that("lgb.train() works with early stopping for classification with a metri ...@@ -1333,7 +1321,7 @@ test_that("lgb.train() works with early stopping for classification with a metri
) )
expect_identical(bst_auc$best_iter, 9L) expect_identical(bst_auc$best_iter, 9L)
expect_identical(bst_auc$current_iter(), nrounds) expect_identical(bst_auc$current_iter(), nrounds)
expect_true(abs(bst_auc$best_score - 0.9999969) < TOLERANCE) expect_true(abs(bst_auc$best_score - 0.9999969) < .LGB_NUMERIC_TOLERANCE)
}) })
test_that("lgb.train() works with early stopping for regression", { test_that("lgb.train() works with early stopping for regression", {
...@@ -1365,7 +1353,7 @@ test_that("lgb.train() works with early stopping for regression", { ...@@ -1365,7 +1353,7 @@ test_that("lgb.train() works with early stopping for regression", {
params = list( params = list(
objective = "regression" objective = "regression"
, metric = "rmse" , metric = "rmse"
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
, data = dtrain , data = dtrain
...@@ -1390,7 +1378,7 @@ test_that("lgb.train() works with early stopping for regression", { ...@@ -1390,7 +1378,7 @@ test_that("lgb.train() works with early stopping for regression", {
objective = "regression" objective = "regression"
, metric = "rmse" , metric = "rmse"
, early_stopping_rounds = early_stopping_rounds , early_stopping_rounds = early_stopping_rounds
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
, data = dtrain , data = dtrain
...@@ -1426,7 +1414,7 @@ test_that("lgb.train() does not stop early if early_stopping_rounds is not given ...@@ -1426,7 +1414,7 @@ test_that("lgb.train() does not stop early if early_stopping_rounds is not given
params = list( params = list(
objective = "regression" objective = "regression"
, metric = "None" , metric = "None"
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
, data = DTRAIN_RANDOM_REGRESSION , data = DTRAIN_RANDOM_REGRESSION
...@@ -1471,7 +1459,7 @@ test_that("If first_metric_only is not given or is FALSE, lgb.train() decides to ...@@ -1471,7 +1459,7 @@ test_that("If first_metric_only is not given or is FALSE, lgb.train() decides to
objective = "regression" objective = "regression"
, metric = "None" , metric = "None"
, early_stopping_rounds = early_stopping_rounds , early_stopping_rounds = early_stopping_rounds
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
, list( , list(
...@@ -1479,7 +1467,7 @@ test_that("If first_metric_only is not given or is FALSE, lgb.train() decides to ...@@ -1479,7 +1467,7 @@ test_that("If first_metric_only is not given or is FALSE, lgb.train() decides to
, metric = "None" , metric = "None"
, early_stopping_rounds = early_stopping_rounds , early_stopping_rounds = early_stopping_rounds
, first_metric_only = FALSE , first_metric_only = FALSE
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
) )
...@@ -1543,7 +1531,7 @@ test_that("If first_metric_only is TRUE, lgb.train() decides to stop early based ...@@ -1543,7 +1531,7 @@ test_that("If first_metric_only is TRUE, lgb.train() decides to stop early based
, metric = "None" , metric = "None"
, early_stopping_rounds = early_stopping_rounds , early_stopping_rounds = early_stopping_rounds
, first_metric_only = TRUE , first_metric_only = TRUE
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
, data = DTRAIN_RANDOM_REGRESSION , data = DTRAIN_RANDOM_REGRESSION
...@@ -1590,7 +1578,7 @@ test_that("lgb.train() works when a mixture of functions and strings are passed ...@@ -1590,7 +1578,7 @@ test_that("lgb.train() works when a mixture of functions and strings are passed
params = list( params = list(
objective = "regression" objective = "regression"
, metric = "None" , metric = "None"
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
, data = DTRAIN_RANDOM_REGRESSION , data = DTRAIN_RANDOM_REGRESSION
...@@ -1616,15 +1604,15 @@ test_that("lgb.train() works when a mixture of functions and strings are passed ...@@ -1616,15 +1604,15 @@ test_that("lgb.train() works when a mixture of functions and strings are passed
# the difference metrics shouldn't have been mixed up with each other # the difference metrics shouldn't have been mixed up with each other
results <- bst$record_evals[["valid1"]] results <- bst$record_evals[["valid1"]]
expect_true(abs(results[["rmse"]][["eval"]][[1L]] - 1.105012) < TOLERANCE) expect_true(abs(results[["rmse"]][["eval"]][[1L]] - 1.105012) < .LGB_NUMERIC_TOLERANCE)
expect_true(abs(results[["l2"]][["eval"]][[1L]] - 1.221051) < TOLERANCE) expect_true(abs(results[["l2"]][["eval"]][[1L]] - 1.221051) < .LGB_NUMERIC_TOLERANCE)
expected_increasing_metric <- increasing_metric_starting_value + 0.1 expected_increasing_metric <- increasing_metric_starting_value + 0.1
expect_true( expect_true(
abs( abs(
results[["increasing_metric"]][["eval"]][[1L]] - expected_increasing_metric results[["increasing_metric"]][["eval"]][[1L]] - expected_increasing_metric
) < TOLERANCE ) < .LGB_NUMERIC_TOLERANCE
) )
expect_true(abs(results[["constant_metric"]][["eval"]][[1L]] - CONSTANT_METRIC_VALUE) < TOLERANCE) expect_true(abs(results[["constant_metric"]][["eval"]][[1L]] - CONSTANT_METRIC_VALUE) < .LGB_NUMERIC_TOLERANCE)
}) })
...@@ -1647,7 +1635,7 @@ test_that("lgb.train() works when a list of strings or a character vector is pas ...@@ -1647,7 +1635,7 @@ test_that("lgb.train() works when a list of strings or a character vector is pas
params = list( params = list(
objective = "binary" objective = "binary"
, metric = "None" , metric = "None"
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
, data = DTRAIN_RANDOM_CLASSIFICATION , data = DTRAIN_RANDOM_CLASSIFICATION
...@@ -1669,10 +1657,10 @@ test_that("lgb.train() works when a list of strings or a character vector is pas ...@@ -1669,10 +1657,10 @@ test_that("lgb.train() works when a list of strings or a character vector is pas
# the difference metrics shouldn't have been mixed up with each other # the difference metrics shouldn't have been mixed up with each other
results <- bst$record_evals[["valid1"]] results <- bst$record_evals[["valid1"]]
if ("binary_error" %in% unlist(eval_variation)) { if ("binary_error" %in% unlist(eval_variation)) {
expect_true(abs(results[["binary_error"]][["eval"]][[1L]] - 0.4864865) < TOLERANCE) expect_true(abs(results[["binary_error"]][["eval"]][[1L]] - 0.4864865) < .LGB_NUMERIC_TOLERANCE)
} }
if ("binary_logloss" %in% unlist(eval_variation)) { if ("binary_logloss" %in% unlist(eval_variation)) {
expect_true(abs(results[["binary_logloss"]][["eval"]][[1L]] - 0.6932548) < TOLERANCE) expect_true(abs(results[["binary_logloss"]][["eval"]][[1L]] - 0.6932548) < .LGB_NUMERIC_TOLERANCE)
} }
} }
}) })
...@@ -1685,7 +1673,7 @@ test_that("lgb.train() works when you specify both 'metric' and 'eval' with stri ...@@ -1685,7 +1673,7 @@ test_that("lgb.train() works when you specify both 'metric' and 'eval' with stri
params = list( params = list(
objective = "binary" objective = "binary"
, metric = "binary_error" , metric = "binary_error"
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
, data = DTRAIN_RANDOM_CLASSIFICATION , data = DTRAIN_RANDOM_CLASSIFICATION
...@@ -1706,8 +1694,8 @@ test_that("lgb.train() works when you specify both 'metric' and 'eval' with stri ...@@ -1706,8 +1694,8 @@ test_that("lgb.train() works when you specify both 'metric' and 'eval' with stri
# the difference metrics shouldn't have been mixed up with each other # the difference metrics shouldn't have been mixed up with each other
results <- bst$record_evals[["valid1"]] results <- bst$record_evals[["valid1"]]
expect_true(abs(results[["binary_error"]][["eval"]][[1L]] - 0.4864865) < TOLERANCE) expect_true(abs(results[["binary_error"]][["eval"]][[1L]] - 0.4864865) < .LGB_NUMERIC_TOLERANCE)
expect_true(abs(results[["binary_logloss"]][["eval"]][[1L]] - 0.6932548) < TOLERANCE) expect_true(abs(results[["binary_logloss"]][["eval"]][[1L]] - 0.6932548) < .LGB_NUMERIC_TOLERANCE)
}) })
test_that("lgb.train() works when you give a function for eval", { test_that("lgb.train() works when you give a function for eval", {
...@@ -1718,7 +1706,7 @@ test_that("lgb.train() works when you give a function for eval", { ...@@ -1718,7 +1706,7 @@ test_that("lgb.train() works when you give a function for eval", {
params = list( params = list(
objective = "binary" objective = "binary"
, metric = "None" , metric = "None"
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
, data = DTRAIN_RANDOM_CLASSIFICATION , data = DTRAIN_RANDOM_CLASSIFICATION
...@@ -1731,7 +1719,7 @@ test_that("lgb.train() works when you give a function for eval", { ...@@ -1731,7 +1719,7 @@ test_that("lgb.train() works when you give a function for eval", {
# the difference metrics shouldn't have been mixed up with each other # the difference metrics shouldn't have been mixed up with each other
results <- bst$record_evals[["valid1"]] results <- bst$record_evals[["valid1"]]
expect_true(abs(results[["constant_metric"]][["eval"]][[1L]] - CONSTANT_METRIC_VALUE) < TOLERANCE) expect_true(abs(results[["constant_metric"]][["eval"]][[1L]] - CONSTANT_METRIC_VALUE) < .LGB_NUMERIC_TOLERANCE)
}) })
test_that("lgb.train() works with early stopping for regression with a metric that should be minimized", { test_that("lgb.train() works with early stopping for regression with a metric that should be minimized", {
...@@ -1770,7 +1758,7 @@ test_that("lgb.train() works with early stopping for regression with a metric th ...@@ -1770,7 +1758,7 @@ test_that("lgb.train() works with early stopping for regression with a metric th
) )
, min_data_in_bin = 5L , min_data_in_bin = 5L
, early_stopping_rounds = early_stopping_rounds , early_stopping_rounds = early_stopping_rounds
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
, data = dtrain , data = dtrain
...@@ -1823,7 +1811,7 @@ test_that("lgb.train() supports non-ASCII feature names", { ...@@ -1823,7 +1811,7 @@ test_that("lgb.train() supports non-ASCII feature names", {
, obj = "regression" , obj = "regression"
, params = list( , params = list(
metric = "rmse" metric = "rmse"
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
, colnames = feature_names , colnames = feature_names
...@@ -1834,7 +1822,7 @@ test_that("lgb.train() supports non-ASCII feature names", { ...@@ -1834,7 +1822,7 @@ test_that("lgb.train() supports non-ASCII feature names", {
# UTF-8 strings are not well-supported on Windows # UTF-8 strings are not well-supported on Windows
# * https://developer.r-project.org/Blog/public/2020/05/02/utf-8-support-on-windows/ # * https://developer.r-project.org/Blog/public/2020/05/02/utf-8-support-on-windows/
# * https://developer.r-project.org/Blog/public/2020/07/30/windows/utf-8-build-of-r-and-cran-packages/index.html # * https://developer.r-project.org/Blog/public/2020/07/30/windows/utf-8-build-of-r-and-cran-packages/index.html
if (UTF8_LOCALE && !ON_WINDOWS) { if (.LGB_UTF8_LOCALE && !.LGB_ON_WINDOWS) {
expect_identical( expect_identical(
dumped_model[["feature_names"]] dumped_model[["feature_names"]]
, feature_names , feature_names
...@@ -1864,7 +1852,7 @@ test_that("lgb.train() works with integer, double, and numeric data", { ...@@ -1864,7 +1852,7 @@ test_that("lgb.train() works with integer, double, and numeric data", {
, min_data_in_leaf = 1L , min_data_in_leaf = 1L
, learning_rate = 0.01 , learning_rate = 0.01
, seed = 708L , seed = 708L
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
) )
, nrounds = nrounds , nrounds = nrounds
) )
...@@ -1877,7 +1865,7 @@ test_that("lgb.train() works with integer, double, and numeric data", { ...@@ -1877,7 +1865,7 @@ test_that("lgb.train() works with integer, double, and numeric data", {
# should have achieved expected performance # should have achieved expected performance
preds <- predict(bst, X) preds <- predict(bst, X)
mae <- mean(abs(y - preds)) mae <- mean(abs(y - preds))
expect_true(abs(mae - expected_mae) < TOLERANCE) expect_true(abs(mae - expected_mae) < .LGB_NUMERIC_TOLERANCE)
} }
}) })
...@@ -1969,7 +1957,7 @@ test_that("when early stopping is not activated, best_iter and best_score come f ...@@ -1969,7 +1957,7 @@ test_that("when early stopping is not activated, best_iter and best_score come f
, metric = "rmse" , metric = "rmse"
, learning_rate = 1.5 , learning_rate = 1.5
, num_leaves = 5L , num_leaves = 5L
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
...@@ -2132,7 +2120,7 @@ test_that("lightgbm.train() gives the correct best_score and best_iter for a met ...@@ -2132,7 +2120,7 @@ test_that("lightgbm.train() gives the correct best_score and best_iter for a met
, metric = "auc" , metric = "auc"
, learning_rate = 1.5 , learning_rate = 1.5
, num_leaves = 5L , num_leaves = 5L
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
) )
...@@ -2218,7 +2206,7 @@ test_that("lgb.cv() works when you specify both 'metric' and 'eval' with strings ...@@ -2218,7 +2206,7 @@ test_that("lgb.cv() works when you specify both 'metric' and 'eval' with strings
params = list( params = list(
objective = "binary" objective = "binary"
, metric = "binary_error" , metric = "binary_error"
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
, data = DTRAIN_RANDOM_CLASSIFICATION , data = DTRAIN_RANDOM_CLASSIFICATION
...@@ -2237,8 +2225,8 @@ test_that("lgb.cv() works when you specify both 'metric' and 'eval' with strings ...@@ -2237,8 +2225,8 @@ test_that("lgb.cv() works when you specify both 'metric' and 'eval' with strings
# the difference metrics shouldn't have been mixed up with each other # the difference metrics shouldn't have been mixed up with each other
results <- bst$record_evals[["valid"]] results <- bst$record_evals[["valid"]]
expect_true(abs(results[["binary_error"]][["eval"]][[1L]] - 0.5005654) < TOLERANCE) expect_true(abs(results[["binary_error"]][["eval"]][[1L]] - 0.5005654) < .LGB_NUMERIC_TOLERANCE)
expect_true(abs(results[["binary_logloss"]][["eval"]][[1L]] - 0.7011232) < TOLERANCE) expect_true(abs(results[["binary_logloss"]][["eval"]][[1L]] - 0.7011232) < .LGB_NUMERIC_TOLERANCE)
# all boosters should have been created # all boosters should have been created
expect_length(bst$boosters, nfolds) expect_length(bst$boosters, nfolds)
...@@ -2253,7 +2241,7 @@ test_that("lgb.cv() works when you give a function for eval", { ...@@ -2253,7 +2241,7 @@ test_that("lgb.cv() works when you give a function for eval", {
params = list( params = list(
objective = "binary" objective = "binary"
, metric = "None" , metric = "None"
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
, data = DTRAIN_RANDOM_CLASSIFICATION , data = DTRAIN_RANDOM_CLASSIFICATION
...@@ -2264,7 +2252,7 @@ test_that("lgb.cv() works when you give a function for eval", { ...@@ -2264,7 +2252,7 @@ test_that("lgb.cv() works when you give a function for eval", {
# the difference metrics shouldn't have been mixed up with each other # the difference metrics shouldn't have been mixed up with each other
results <- bst$record_evals[["valid"]] results <- bst$record_evals[["valid"]]
expect_true(abs(results[["constant_metric"]][["eval"]][[1L]] - CONSTANT_METRIC_VALUE) < TOLERANCE) expect_true(abs(results[["constant_metric"]][["eval"]][[1L]] - CONSTANT_METRIC_VALUE) < .LGB_NUMERIC_TOLERANCE)
expect_named(results, "constant_metric") expect_named(results, "constant_metric")
}) })
...@@ -2280,7 +2268,7 @@ test_that("If first_metric_only is TRUE, lgb.cv() decides to stop early based on ...@@ -2280,7 +2268,7 @@ test_that("If first_metric_only is TRUE, lgb.cv() decides to stop early based on
, metric = "None" , metric = "None"
, early_stopping_rounds = early_stopping_rounds , early_stopping_rounds = early_stopping_rounds
, first_metric_only = TRUE , first_metric_only = TRUE
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
, data = DTRAIN_RANDOM_REGRESSION , data = DTRAIN_RANDOM_REGRESSION
...@@ -2338,7 +2326,7 @@ test_that("early stopping works with lgb.cv()", { ...@@ -2338,7 +2326,7 @@ test_that("early stopping works with lgb.cv()", {
, metric = "None" , metric = "None"
, early_stopping_rounds = early_stopping_rounds , early_stopping_rounds = early_stopping_rounds
, first_metric_only = TRUE , first_metric_only = TRUE
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
, data = DTRAIN_RANDOM_REGRESSION , data = DTRAIN_RANDOM_REGRESSION
...@@ -2520,7 +2508,7 @@ test_that("lgb.train() fit on linearly-relatead data improves when using linear ...@@ -2520,7 +2508,7 @@ test_that("lgb.train() fit on linearly-relatead data improves when using linear
params <- list( params <- list(
objective = "regression" objective = "regression"
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, metric = "mse" , metric = "mse"
, seed = 0L , seed = 0L
, num_leaves = 2L , num_leaves = 2L
...@@ -2555,7 +2543,7 @@ test_that("lgb.train() with linear learner fails already-constructed dataset wit ...@@ -2555,7 +2543,7 @@ test_that("lgb.train() with linear learner fails already-constructed dataset wit
set.seed(708L) set.seed(708L)
params <- list( params <- list(
objective = "regression" objective = "regression"
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, metric = "mse" , metric = "mse"
, seed = 0L , seed = 0L
, num_leaves = 2L , num_leaves = 2L
...@@ -2597,7 +2585,7 @@ test_that("lgb.train() works with linear learners even if Dataset has missing va ...@@ -2597,7 +2585,7 @@ test_that("lgb.train() works with linear learners even if Dataset has missing va
params <- list( params <- list(
objective = "regression" objective = "regression"
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, metric = "mse" , metric = "mse"
, seed = 0L , seed = 0L
, num_leaves = 2L , num_leaves = 2L
...@@ -2645,7 +2633,7 @@ test_that("lgb.train() works with linear learners, bagging, and a Dataset that h ...@@ -2645,7 +2633,7 @@ test_that("lgb.train() works with linear learners, bagging, and a Dataset that h
params <- list( params <- list(
objective = "regression" objective = "regression"
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, metric = "mse" , metric = "mse"
, seed = 0L , seed = 0L
, num_leaves = 2L , num_leaves = 2L
...@@ -2806,7 +2794,7 @@ test_that(paste0("lgb.train() gives same result when interaction_constraints is ...@@ -2806,7 +2794,7 @@ test_that(paste0("lgb.train() gives same result when interaction_constraints is
params <- list( params <- list(
objective = "regression" objective = "regression"
, interaction_constraints = list(c(1L, 2L), 3L) , interaction_constraints = list(c(1L, 2L), 3L)
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
bst <- lightgbm( bst <- lightgbm(
...@@ -2820,7 +2808,7 @@ test_that(paste0("lgb.train() gives same result when interaction_constraints is ...@@ -2820,7 +2808,7 @@ test_that(paste0("lgb.train() gives same result when interaction_constraints is
params <- list( params <- list(
objective = "regression" objective = "regression"
, interaction_constraints = list(c(cnames[[1L]], cnames[[2L]]), cnames[[3L]]) , interaction_constraints = list(c(cnames[[1L]], cnames[[2L]]), cnames[[3L]])
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
bst <- lightgbm( bst <- lightgbm(
...@@ -2833,7 +2821,7 @@ test_that(paste0("lgb.train() gives same result when interaction_constraints is ...@@ -2833,7 +2821,7 @@ test_that(paste0("lgb.train() gives same result when interaction_constraints is
params <- list( params <- list(
objective = "regression" objective = "regression"
, interaction_constraints = list(c(cnames[[1L]], cnames[[2L]]), 3L) , interaction_constraints = list(c(cnames[[1L]], cnames[[2L]]), 3L)
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
bst <- lightgbm( bst <- lightgbm(
...@@ -2855,7 +2843,7 @@ test_that(paste0("lgb.train() gives same results when using interaction_constrai ...@@ -2855,7 +2843,7 @@ test_that(paste0("lgb.train() gives same results when using interaction_constrai
params <- list( params <- list(
objective = "regression" objective = "regression"
, interaction_constraints = list(c(1L, 2L), 3L) , interaction_constraints = list(c(1L, 2L), 3L)
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
bst <- lightgbm( bst <- lightgbm(
...@@ -2869,7 +2857,7 @@ test_that(paste0("lgb.train() gives same results when using interaction_constrai ...@@ -2869,7 +2857,7 @@ test_that(paste0("lgb.train() gives same results when using interaction_constrai
params <- list( params <- list(
objective = "regression" objective = "regression"
, interaction_constraints = list(c(new_colnames[1L], new_colnames[2L]), new_colnames[3L]) , interaction_constraints = list(c(new_colnames[1L], new_colnames[2L]), new_colnames[3L])
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
bst <- lightgbm( bst <- lightgbm(
...@@ -3015,7 +3003,7 @@ for (x3_to_categorical in c(TRUE, FALSE)) { ...@@ -3015,7 +3003,7 @@ for (x3_to_categorical in c(TRUE, FALSE)) {
, monotone_constraints = c(1L, -1L, 0L) , monotone_constraints = c(1L, -1L, 0L)
, monotone_constraints_method = monotone_constraints_method , monotone_constraints_method = monotone_constraints_method
, use_missing = FALSE , use_missing = FALSE
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
constrained_model <- lgb.train( constrained_model <- lgb.train(
...@@ -3040,7 +3028,7 @@ test_that("lightgbm() accepts objective as function argument and under params", ...@@ -3040,7 +3028,7 @@ test_that("lightgbm() accepts objective as function argument and under params",
, label = train$label , label = train$label
, params = list(objective = "regression_l1", num_threads = .LGB_MAX_THREADS) , params = list(objective = "regression_l1", num_threads = .LGB_MAX_THREADS)
, nrounds = 5L , nrounds = 5L
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
) )
expect_equal(bst1$params$objective, "regression_l1") expect_equal(bst1$params$objective, "regression_l1")
model_txt_lines <- strsplit( model_txt_lines <- strsplit(
...@@ -3056,7 +3044,7 @@ test_that("lightgbm() accepts objective as function argument and under params", ...@@ -3056,7 +3044,7 @@ test_that("lightgbm() accepts objective as function argument and under params",
, label = train$label , label = train$label
, objective = "regression_l1" , objective = "regression_l1"
, nrounds = 5L , nrounds = 5L
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
) )
expect_equal(bst2$params$objective, "regression_l1") expect_equal(bst2$params$objective, "regression_l1")
model_txt_lines <- strsplit( model_txt_lines <- strsplit(
...@@ -3075,7 +3063,7 @@ test_that("lightgbm() prioritizes objective under params over objective as funct ...@@ -3075,7 +3063,7 @@ test_that("lightgbm() prioritizes objective under params over objective as funct
, objective = "regression" , objective = "regression"
, params = list(objective = "regression_l1", num_threads = .LGB_MAX_THREADS) , params = list(objective = "regression_l1", num_threads = .LGB_MAX_THREADS)
, nrounds = 5L , nrounds = 5L
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
) )
expect_equal(bst1$params$objective, "regression_l1") expect_equal(bst1$params$objective, "regression_l1")
model_txt_lines <- strsplit( model_txt_lines <- strsplit(
...@@ -3092,7 +3080,7 @@ test_that("lightgbm() prioritizes objective under params over objective as funct ...@@ -3092,7 +3080,7 @@ test_that("lightgbm() prioritizes objective under params over objective as funct
, objective = "regression" , objective = "regression"
, params = list(loss = "regression_l1", num_threads = .LGB_MAX_THREADS) , params = list(loss = "regression_l1", num_threads = .LGB_MAX_THREADS)
, nrounds = 5L , nrounds = 5L
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
) )
expect_equal(bst2$params$objective, "regression_l1") expect_equal(bst2$params$objective, "regression_l1")
model_txt_lines <- strsplit( model_txt_lines <- strsplit(
...@@ -3110,7 +3098,7 @@ test_that("lightgbm() accepts init_score as function argument", { ...@@ -3110,7 +3098,7 @@ test_that("lightgbm() accepts init_score as function argument", {
, label = train$label , label = train$label
, objective = "binary" , objective = "binary"
, nrounds = 5L , nrounds = 5L
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, params = list(num_threads = .LGB_MAX_THREADS) , params = list(num_threads = .LGB_MAX_THREADS)
) )
pred1 <- predict(bst1, train$data, type = "raw") pred1 <- predict(bst1, train$data, type = "raw")
...@@ -3121,7 +3109,7 @@ test_that("lightgbm() accepts init_score as function argument", { ...@@ -3121,7 +3109,7 @@ test_that("lightgbm() accepts init_score as function argument", {
, init_score = pred1 , init_score = pred1
, objective = "binary" , objective = "binary"
, nrounds = 5L , nrounds = 5L
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, params = list(num_threads = .LGB_MAX_THREADS) , params = list(num_threads = .LGB_MAX_THREADS)
) )
pred2 <- predict(bst2, train$data, type = "raw") pred2 <- predict(bst2, train$data, type = "raw")
...@@ -3134,7 +3122,7 @@ test_that("lightgbm() defaults to 'regression' objective if objective not otherw ...@@ -3134,7 +3122,7 @@ test_that("lightgbm() defaults to 'regression' objective if objective not otherw
data = train$data data = train$data
, label = train$label , label = train$label
, nrounds = 5L , nrounds = 5L
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, params = list(num_threads = .LGB_MAX_THREADS) , params = list(num_threads = .LGB_MAX_THREADS)
) )
expect_equal(bst$params$objective, "regression") expect_equal(bst$params$objective, "regression")
...@@ -3152,7 +3140,7 @@ test_that("lightgbm() accepts 'num_threads' as either top-level argument or unde ...@@ -3152,7 +3140,7 @@ test_that("lightgbm() accepts 'num_threads' as either top-level argument or unde
data = train$data data = train$data
, label = train$label , label = train$label
, nrounds = 5L , nrounds = 5L
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = 1L , num_threads = 1L
) )
expect_equal(bst$params$num_threads, 1L) expect_equal(bst$params$num_threads, 1L)
...@@ -3167,7 +3155,7 @@ test_that("lightgbm() accepts 'num_threads' as either top-level argument or unde ...@@ -3167,7 +3155,7 @@ test_that("lightgbm() accepts 'num_threads' as either top-level argument or unde
data = train$data data = train$data
, label = train$label , label = train$label
, nrounds = 5L , nrounds = 5L
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, params = list(num_threads = 1L) , params = list(num_threads = 1L)
) )
expect_equal(bst$params$num_threads, 1L) expect_equal(bst$params$num_threads, 1L)
...@@ -3182,7 +3170,7 @@ test_that("lightgbm() accepts 'num_threads' as either top-level argument or unde ...@@ -3182,7 +3170,7 @@ test_that("lightgbm() accepts 'num_threads' as either top-level argument or unde
data = train$data data = train$data
, label = train$label , label = train$label
, nrounds = 5L , nrounds = 5L
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = 10L , num_threads = 10L
, params = list(num_threads = 1L) , params = list(num_threads = 1L)
) )
...@@ -3206,7 +3194,7 @@ test_that("lightgbm() accepts 'weight' and 'weights'", { ...@@ -3206,7 +3194,7 @@ test_that("lightgbm() accepts 'weight' and 'weights'", {
, weights = w , weights = w
, obj = "regression" , obj = "regression"
, nrounds = 5L , nrounds = 5L
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, params = list( , params = list(
min_data_in_bin = 1L min_data_in_bin = 1L
, min_data_in_leaf = 1L , min_data_in_leaf = 1L
...@@ -3262,7 +3250,7 @@ test_that("lightgbm() accepts 'weight' and 'weights'", { ...@@ -3262,7 +3250,7 @@ test_that("lightgbm() accepts 'weight' and 'weights'", {
expect_equal( expect_equal(
object = unlist(record_evals[["valid"]][["auc"]][["eval"]]) object = unlist(record_evals[["valid"]][["auc"]][["eval"]])
, expected = expected_valid_auc , expected = expected_valid_auc
, tolerance = TOLERANCE , tolerance = .LGB_NUMERIC_TOLERANCE
) )
expect_named(record_evals, c("start_iter", "valid"), ignore.order = TRUE, ignore.case = FALSE) expect_named(record_evals, c("start_iter", "valid"), ignore.order = TRUE, ignore.case = FALSE)
expect_equal(record_evals[["valid"]][["auc"]][["eval_err"]], list()) expect_equal(record_evals[["valid"]][["auc"]][["eval_err"]], list())
...@@ -3667,7 +3655,7 @@ test_that("lightgbm() changes objective='auto' appropriately", { ...@@ -3667,7 +3655,7 @@ test_that("lightgbm() changes objective='auto' appropriately", {
data("mtcars") data("mtcars")
y <- mtcars$mpg y <- mtcars$mpg
x <- as.matrix(mtcars[, -1L]) x <- as.matrix(mtcars[, -1L])
model <- lightgbm(x, y, objective = "auto", verbose = VERBOSITY, nrounds = 5L, num_threads = .LGB_MAX_THREADS) model <- lightgbm(x, y, objective = "auto", verbose = .LGB_VERBOSITY, nrounds = 5L, num_threads = .LGB_MAX_THREADS)
expect_equal(model$params$objective, "regression") expect_equal(model$params$objective, "regression")
model_txt_lines <- strsplit( model_txt_lines <- strsplit(
x = model$save_model_to_string() x = model$save_model_to_string()
...@@ -3680,7 +3668,7 @@ test_that("lightgbm() changes objective='auto' appropriately", { ...@@ -3680,7 +3668,7 @@ test_that("lightgbm() changes objective='auto' appropriately", {
# Binary classification # Binary classification
x <- train$data x <- train$data
y <- factor(train$label) y <- factor(train$label)
model <- lightgbm(x, y, objective = "auto", verbose = VERBOSITY, nrounds = 5L, num_threads = .LGB_MAX_THREADS) model <- lightgbm(x, y, objective = "auto", verbose = .LGB_VERBOSITY, nrounds = 5L, num_threads = .LGB_MAX_THREADS)
expect_equal(model$params$objective, "binary") expect_equal(model$params$objective, "binary")
model_txt_lines <- strsplit( model_txt_lines <- strsplit(
x = model$save_model_to_string() x = model$save_model_to_string()
...@@ -3693,7 +3681,7 @@ test_that("lightgbm() changes objective='auto' appropriately", { ...@@ -3693,7 +3681,7 @@ test_that("lightgbm() changes objective='auto' appropriately", {
data("iris") data("iris")
y <- factor(iris$Species) y <- factor(iris$Species)
x <- as.matrix(iris[, -5L]) x <- as.matrix(iris[, -5L])
model <- lightgbm(x, y, objective = "auto", verbose = VERBOSITY, nrounds = 5L, num_threads = .LGB_MAX_THREADS) model <- lightgbm(x, y, objective = "auto", verbose = .LGB_VERBOSITY, nrounds = 5L, num_threads = .LGB_MAX_THREADS)
expect_equal(model$params$objective, "multiclass") expect_equal(model$params$objective, "multiclass")
expect_equal(model$params$num_class, 3L) expect_equal(model$params$num_class, 3L)
model_txt_lines <- strsplit( model_txt_lines <- strsplit(
...@@ -3712,7 +3700,7 @@ test_that("lightgbm() determines number of classes for non-default multiclass ob ...@@ -3712,7 +3700,7 @@ test_that("lightgbm() determines number of classes for non-default multiclass ob
x x
, y , y
, objective = "multiclassova" , objective = "multiclassova"
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, nrounds = 5L , nrounds = 5L
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
...@@ -3731,7 +3719,7 @@ test_that("lightgbm() doesn't accept binary classification with non-binary facto ...@@ -3731,7 +3719,7 @@ test_that("lightgbm() doesn't accept binary classification with non-binary facto
y <- factor(iris$Species) y <- factor(iris$Species)
x <- as.matrix(iris[, -5L]) x <- as.matrix(iris[, -5L])
expect_error({ expect_error({
lightgbm(x, y, objective = "binary", verbose = VERBOSITY, nrounds = 5L, num_threads = .LGB_MAX_THREADS) lightgbm(x, y, objective = "binary", verbose = .LGB_VERBOSITY, nrounds = 5L, num_threads = .LGB_MAX_THREADS)
}, regexp = "Factors with >2 levels as labels only allowed for multi-class objectives") }, regexp = "Factors with >2 levels as labels only allowed for multi-class objectives")
}) })
...@@ -3742,7 +3730,7 @@ test_that("lightgbm() doesn't accept multi-class classification with binary fact ...@@ -3742,7 +3730,7 @@ test_that("lightgbm() doesn't accept multi-class classification with binary fact
y <- factor(y) y <- factor(y)
x <- as.matrix(iris[, -5L]) x <- as.matrix(iris[, -5L])
expect_error({ expect_error({
lightgbm(x, y, objective = "multiclass", verbose = VERBOSITY, nrounds = 5L, num_threads = .LGB_MAX_THREADS) lightgbm(x, y, objective = "multiclass", verbose = .LGB_VERBOSITY, nrounds = 5L, num_threads = .LGB_MAX_THREADS)
}, regexp = "Two-level factors as labels only allowed for objective='binary'") }, regexp = "Two-level factors as labels only allowed for objective='binary'")
}) })
...@@ -3750,7 +3738,7 @@ test_that("lightgbm() model predictions retain factor levels for multiclass clas ...@@ -3750,7 +3738,7 @@ test_that("lightgbm() model predictions retain factor levels for multiclass clas
data("iris") data("iris")
y <- factor(iris$Species) y <- factor(iris$Species)
x <- as.matrix(iris[, -5L]) x <- as.matrix(iris[, -5L])
model <- lightgbm(x, y, objective = "auto", verbose = VERBOSITY, nrounds = 5L, num_threads = .LGB_MAX_THREADS) model <- lightgbm(x, y, objective = "auto", verbose = .LGB_VERBOSITY, nrounds = 5L, num_threads = .LGB_MAX_THREADS)
pred <- predict(model, x, type = "class") pred <- predict(model, x, type = "class")
expect_true(is.factor(pred)) expect_true(is.factor(pred))
...@@ -3769,7 +3757,7 @@ test_that("lightgbm() model predictions retain factor levels for binary classifi ...@@ -3769,7 +3757,7 @@ test_that("lightgbm() model predictions retain factor levels for binary classifi
y[y == "setosa"] <- "versicolor" y[y == "setosa"] <- "versicolor"
y <- factor(y) y <- factor(y)
x <- as.matrix(iris[, -5L]) x <- as.matrix(iris[, -5L])
model <- lightgbm(x, y, objective = "auto", verbose = VERBOSITY, nrounds = 5L, num_threads = .LGB_MAX_THREADS) model <- lightgbm(x, y, objective = "auto", verbose = .LGB_VERBOSITY, nrounds = 5L, num_threads = .LGB_MAX_THREADS)
pred <- predict(model, x, type = "class") pred <- predict(model, x, type = "class")
expect_true(is.factor(pred)) expect_true(is.factor(pred))
......
VERBOSITY <- as.integer(
Sys.getenv("LIGHTGBM_TEST_VERBOSITY", "-1")
)
data(agaricus.train, package = "lightgbm") data(agaricus.train, package = "lightgbm")
data(agaricus.test, package = "lightgbm") data(agaricus.test, package = "lightgbm")
dtrain <- lgb.Dataset(agaricus.train$data, label = agaricus.train$label) dtrain <- lgb.Dataset(agaricus.train$data, label = agaricus.train$label)
dtest <- lgb.Dataset(agaricus.test$data, label = agaricus.test$label) dtest <- lgb.Dataset(agaricus.test$data, label = agaricus.test$label)
watchlist <- list(eval = dtest, train = dtrain) watchlist <- list(eval = dtest, train = dtrain)
TOLERANCE <- 1e-6
logregobj <- function(preds, dtrain) { logregobj <- function(preds, dtrain) {
labels <- get_field(dtrain, "label") labels <- get_field(dtrain, "label")
preds <- 1.0 / (1.0 + exp(-preds)) preds <- 1.0 / (1.0 + exp(-preds))
...@@ -38,7 +32,7 @@ param <- list( ...@@ -38,7 +32,7 @@ param <- list(
, learning_rate = 1.0 , learning_rate = 1.0
, objective = logregobj , objective = logregobj
, metric = "auc" , metric = "auc"
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
num_round <- 10L num_round <- 10L
...@@ -54,7 +48,7 @@ test_that("using a custom objective, custom eval, and no other metrics works", { ...@@ -54,7 +48,7 @@ test_that("using a custom objective, custom eval, and no other metrics works", {
params = list( params = list(
num_leaves = 8L num_leaves = 8L
, learning_rate = 1.0 , learning_rate = 1.0
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
, data = dtrain , data = dtrain
...@@ -65,11 +59,11 @@ test_that("using a custom objective, custom eval, and no other metrics works", { ...@@ -65,11 +59,11 @@ test_that("using a custom objective, custom eval, and no other metrics works", {
) )
expect_false(is.null(bst$record_evals)) expect_false(is.null(bst$record_evals))
expect_equal(bst$best_iter, 4L) expect_equal(bst$best_iter, 4L)
expect_true(abs(bst$best_score - 0.000621) < TOLERANCE) expect_true(abs(bst$best_score - 0.000621) < .LGB_NUMERIC_TOLERANCE)
eval_results <- bst$eval_valid(feval = evalerror)[[1L]] eval_results <- bst$eval_valid(feval = evalerror)[[1L]]
expect_true(eval_results[["data_name"]] == "eval") expect_true(eval_results[["data_name"]] == "eval")
expect_true(abs(eval_results[["value"]] - 0.0006207325) < TOLERANCE) expect_true(abs(eval_results[["value"]] - 0.0006207325) < .LGB_NUMERIC_TOLERANCE)
expect_true(eval_results[["name"]] == "error") expect_true(eval_results[["name"]] == "error")
expect_false(eval_results[["higher_better"]]) expect_false(eval_results[["higher_better"]])
}) })
...@@ -81,7 +75,7 @@ test_that("using a custom objective that returns wrong shape grad or hess raises ...@@ -81,7 +75,7 @@ test_that("using a custom objective that returns wrong shape grad or hess raises
bad_hess <- function(preds, dtrain) { bad_hess <- function(preds, dtrain) {
return(list(grad = rep(1.0, length(preds)), hess = numeric(0L))) return(list(grad = rep(1.0, length(preds)), hess = numeric(0L)))
} }
params <- list(num_leaves = 3L, verbose = VERBOSITY) params <- list(num_leaves = 3L, verbose = .LGB_VERBOSITY)
expect_error({ expect_error({
lgb.train(params = params, data = dtrain, obj = bad_grad) lgb.train(params = params, data = dtrain, obj = bad_grad)
}, sprintf("Expected custom objective function to return grad with length %d, got 0.", nrow(dtrain))) }, sprintf("Expected custom objective function to return grad with length %d, got 0.", nrow(dtrain)))
......
VERBOSITY <- as.integer(
Sys.getenv("LIGHTGBM_TEST_VERBOSITY", "-1")
)
data(agaricus.train, package = "lightgbm") data(agaricus.train, package = "lightgbm")
train_data <- agaricus.train$data[seq_len(1000L), ] train_data <- agaricus.train$data[seq_len(1000L), ]
train_label <- agaricus.train$label[seq_len(1000L)] train_label <- agaricus.train$label[seq_len(1000L)]
...@@ -16,7 +12,7 @@ test_that("lgb.Dataset: basic construction, saving, loading", { ...@@ -16,7 +12,7 @@ test_that("lgb.Dataset: basic construction, saving, loading", {
test_data test_data
, label = test_label , label = test_label
, params = list( , params = list(
verbose = VERBOSITY verbose = .LGB_VERBOSITY
) )
) )
# from dense matrix # from dense matrix
...@@ -30,7 +26,7 @@ test_that("lgb.Dataset: basic construction, saving, loading", { ...@@ -30,7 +26,7 @@ test_that("lgb.Dataset: basic construction, saving, loading", {
dtest3 <- lgb.Dataset( dtest3 <- lgb.Dataset(
tmp_file tmp_file
, params = list( , params = list(
verbose = VERBOSITY verbose = .LGB_VERBOSITY
) )
) )
lgb.Dataset.construct(dtest3) lgb.Dataset.construct(dtest3)
...@@ -376,7 +372,7 @@ test_that("lgb.Dataset: should be able to run lgb.train() immediately after usin ...@@ -376,7 +372,7 @@ test_that("lgb.Dataset: should be able to run lgb.train() immediately after usin
data = test_data data = test_data
, label = test_label , label = test_label
, params = list( , params = list(
verbose = VERBOSITY verbose = .LGB_VERBOSITY
) )
) )
tmp_file <- tempfile(pattern = "lgb.Dataset_") tmp_file <- tempfile(pattern = "lgb.Dataset_")
...@@ -393,7 +389,7 @@ test_that("lgb.Dataset: should be able to run lgb.train() immediately after usin ...@@ -393,7 +389,7 @@ test_that("lgb.Dataset: should be able to run lgb.train() immediately after usin
, metric = "binary_logloss" , metric = "binary_logloss"
, num_leaves = 5L , num_leaves = 5L
, learning_rate = 1.0 , learning_rate = 1.0
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
...@@ -411,7 +407,7 @@ test_that("lgb.Dataset: should be able to run lgb.cv() immediately after using l ...@@ -411,7 +407,7 @@ test_that("lgb.Dataset: should be able to run lgb.cv() immediately after using l
data = test_data data = test_data
, label = test_label , label = test_label
, params = list( , params = list(
verbosity = VERBOSITY verbosity = .LGB_VERBOSITY
) )
) )
tmp_file <- tempfile(pattern = "lgb.Dataset_") tmp_file <- tempfile(pattern = "lgb.Dataset_")
...@@ -429,7 +425,7 @@ test_that("lgb.Dataset: should be able to run lgb.cv() immediately after using l ...@@ -429,7 +425,7 @@ test_that("lgb.Dataset: should be able to run lgb.cv() immediately after using l
, num_leaves = 5L , num_leaves = 5L
, learning_rate = 1.0 , learning_rate = 1.0
, num_iterations = 5L , num_iterations = 5L
, verbosity = VERBOSITY , verbosity = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
...@@ -475,7 +471,7 @@ test_that("lgb.Dataset: should be able to create a Dataset from a text file with ...@@ -475,7 +471,7 @@ test_that("lgb.Dataset: should be able to create a Dataset from a text file with
data = train_file data = train_file
, params = list( , params = list(
header = TRUE header = TRUE
, verbosity = VERBOSITY , verbosity = .LGB_VERBOSITY
) )
) )
dtrain$construct() dtrain$construct()
...@@ -499,7 +495,7 @@ test_that("lgb.Dataset: should be able to create a Dataset from a text file with ...@@ -499,7 +495,7 @@ test_that("lgb.Dataset: should be able to create a Dataset from a text file with
data = train_file data = train_file
, params = list( , params = list(
header = FALSE header = FALSE
, verbosity = VERBOSITY , verbosity = .LGB_VERBOSITY
) )
) )
dtrain$construct() dtrain$construct()
......
VERBOSITY <- as.integer(
Sys.getenv("LIGHTGBM_TEST_VERBOSITY", "-1")
)
# numerical tolerance to use when checking metric values
TOLERANCE <- 1e-06
ON_32_BIT_WINDOWS <- .Platform$OS.type == "windows" && .Machine$sizeof.pointer != 8L
test_that("learning-to-rank with lgb.train() works as expected", { test_that("learning-to-rank with lgb.train() works as expected", {
set.seed(708L) set.seed(708L)
data(agaricus.train, package = "lightgbm") data(agaricus.train, package = "lightgbm")
...@@ -26,7 +17,7 @@ test_that("learning-to-rank with lgb.train() works as expected", { ...@@ -26,7 +17,7 @@ test_that("learning-to-rank with lgb.train() works as expected", {
, ndcg_at = ndcg_at , ndcg_at = ndcg_at
, lambdarank_truncation_level = 3L , lambdarank_truncation_level = 3L
, learning_rate = 0.001 , learning_rate = 0.001
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
model <- lgb.train( model <- lgb.train(
...@@ -60,15 +51,15 @@ test_that("learning-to-rank with lgb.train() works as expected", { ...@@ -60,15 +51,15 @@ test_that("learning-to-rank with lgb.train() works as expected", {
, eval_names , eval_names
) )
expect_equal(eval_results[[1L]][["value"]], 0.775) expect_equal(eval_results[[1L]][["value"]], 0.775)
if (!ON_32_BIT_WINDOWS) { if (!.LGB_ON_32_BIT_WINDOWS) {
expect_true(abs(eval_results[[2L]][["value"]] - 0.745986) < TOLERANCE) expect_true(abs(eval_results[[2L]][["value"]] - 0.745986) < .LGB_NUMERIC_TOLERANCE)
expect_true(abs(eval_results[[3L]][["value"]] - 0.7351959) < TOLERANCE) expect_true(abs(eval_results[[3L]][["value"]] - 0.7351959) < .LGB_NUMERIC_TOLERANCE)
} }
}) })
test_that("learning-to-rank with lgb.cv() works as expected", { test_that("learning-to-rank with lgb.cv() works as expected", {
testthat::skip_if( testthat::skip_if(
ON_32_BIT_WINDOWS .LGB_ON_32_BIT_WINDOWS
, message = "Skipping on 32-bit Windows" , message = "Skipping on 32-bit Windows"
) )
set.seed(708L) set.seed(708L)
...@@ -91,7 +82,7 @@ test_that("learning-to-rank with lgb.cv() works as expected", { ...@@ -91,7 +82,7 @@ test_that("learning-to-rank with lgb.cv() works as expected", {
, label_gain = "0,1,3" , label_gain = "0,1,3"
, min_data = 1L , min_data = 1L
, learning_rate = 0.01 , learning_rate = 0.01
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
nfold <- 4L nfold <- 4L
...@@ -115,7 +106,7 @@ test_that("learning-to-rank with lgb.cv() works as expected", { ...@@ -115,7 +106,7 @@ test_that("learning-to-rank with lgb.cv() works as expected", {
best_score <- cv_bst$best_score best_score <- cv_bst$best_score
expect_true(best_iter > 0L && best_iter <= nrounds) expect_true(best_iter > 0L && best_iter <= nrounds)
expect_true(best_score > 0.0 && best_score < 1.0) expect_true(best_score > 0.0 && best_score < 1.0)
expect_true(abs(best_score - 0.75) < TOLERANCE) expect_true(abs(best_score - 0.75) < .LGB_NUMERIC_TOLERANCE)
# best_score should be set for the first metric # best_score should be set for the first metric
first_metric <- eval_names[[1L]] first_metric <- eval_names[[1L]]
...@@ -138,19 +129,19 @@ test_that("learning-to-rank with lgb.cv() works as expected", { ...@@ -138,19 +129,19 @@ test_that("learning-to-rank with lgb.cv() works as expected", {
# first and last value of each metric should be as expected # first and last value of each metric should be as expected
ndcg1_values <- c(0.675, 0.725, 0.65, 0.725, 0.75, 0.725, 0.75, 0.725, 0.75, 0.75) ndcg1_values <- c(0.675, 0.725, 0.65, 0.725, 0.75, 0.725, 0.75, 0.725, 0.75, 0.75)
expect_true(all(abs(unlist(eval_results[["ndcg@1"]][["eval"]]) - ndcg1_values) < TOLERANCE)) expect_true(all(abs(unlist(eval_results[["ndcg@1"]][["eval"]]) - ndcg1_values) < .LGB_NUMERIC_TOLERANCE))
ndcg2_values <- c( ndcg2_values <- c(
0.6556574, 0.6669721, 0.6306574, 0.6476294, 0.6629581, 0.6556574, 0.6669721, 0.6306574, 0.6476294, 0.6629581,
0.6476294, 0.6629581, 0.6379581, 0.7113147, 0.6823008 0.6476294, 0.6629581, 0.6379581, 0.7113147, 0.6823008
) )
expect_true(all(abs(unlist(eval_results[["ndcg@2"]][["eval"]]) - ndcg2_values) < TOLERANCE)) expect_true(all(abs(unlist(eval_results[["ndcg@2"]][["eval"]]) - ndcg2_values) < .LGB_NUMERIC_TOLERANCE))
ndcg3_values <- c( ndcg3_values <- c(
0.6484639, 0.6571238, 0.6469279, 0.6540516, 0.6481857, 0.6484639, 0.6571238, 0.6469279, 0.6540516, 0.6481857,
0.6481857, 0.6481857, 0.6466496, 0.7027939, 0.6629898 0.6481857, 0.6481857, 0.6466496, 0.7027939, 0.6629898
) )
expect_true(all(abs(unlist(eval_results[["ndcg@3"]][["eval"]]) - ndcg3_values) < TOLERANCE)) expect_true(all(abs(unlist(eval_results[["ndcg@3"]][["eval"]]) - ndcg3_values) < .LGB_NUMERIC_TOLERANCE))
# check details of each booster # check details of each booster
for (bst in cv_bst$boosters) { for (bst in cv_bst$boosters) {
......
VERBOSITY <- as.integer(
Sys.getenv("LIGHTGBM_TEST_VERBOSITY", "-1")
)
ON_WINDOWS <- .Platform$OS.type == "windows"
TOLERANCE <- 1e-6
test_that("Booster$finalize() should not fail", { test_that("Booster$finalize() should not fail", {
X <- as.matrix(as.integer(iris[, "Species"]), ncol = 1L) X <- as.matrix(as.integer(iris[, "Species"]), ncol = 1L)
y <- iris[["Sepal.Length"]] y <- iris[["Sepal.Length"]]
...@@ -15,7 +8,7 @@ test_that("Booster$finalize() should not fail", { ...@@ -15,7 +8,7 @@ test_that("Booster$finalize() should not fail", {
objective = "regression" objective = "regression"
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, nrounds = 3L , nrounds = 3L
) )
expect_true(lgb.is.Booster(bst)) expect_true(lgb.is.Booster(bst))
...@@ -66,7 +59,7 @@ test_that("lgb.get.eval.result() should throw an informative error for incorrect ...@@ -66,7 +59,7 @@ test_that("lgb.get.eval.result() should throw an informative error for incorrect
, metric = "l2" , metric = "l2"
, min_data = 1L , min_data = 1L
, learning_rate = 1.0 , learning_rate = 1.0
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
, data = dtrain , data = dtrain
...@@ -101,7 +94,7 @@ test_that("lgb.get.eval.result() should throw an informative error for incorrect ...@@ -101,7 +94,7 @@ test_that("lgb.get.eval.result() should throw an informative error for incorrect
, metric = "l2" , metric = "l2"
, min_data = 1L , min_data = 1L
, learning_rate = 1.0 , learning_rate = 1.0
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
) )
, data = dtrain , data = dtrain
, nrounds = 5L , nrounds = 5L
...@@ -135,7 +128,7 @@ test_that("lgb.load() gives the expected error messages given different incorrec ...@@ -135,7 +128,7 @@ test_that("lgb.load() gives the expected error messages given different incorrec
objective = "binary" objective = "binary"
, num_leaves = 4L , num_leaves = 4L
, learning_rate = 1.0 , learning_rate = 1.0
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
) )
, nrounds = 2L , nrounds = 2L
) )
...@@ -186,7 +179,7 @@ test_that("Loading a Booster from a text file works", { ...@@ -186,7 +179,7 @@ test_that("Loading a Booster from a text file works", {
, metric = c("mape", "average_precision") , metric = c("mape", "average_precision")
, learning_rate = 1.0 , learning_rate = 1.0
, objective = "binary" , objective = "binary"
, verbosity = VERBOSITY , verbosity = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
bst <- lightgbm( bst <- lightgbm(
...@@ -237,7 +230,7 @@ test_that("boosters with linear models at leaves can be written to text file and ...@@ -237,7 +230,7 @@ test_that("boosters with linear models at leaves can be written to text file and
data = dtrain data = dtrain
, nrounds = 10L , nrounds = 10L
, params = params , params = params
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
) )
expect_true(lgb.is.Booster(bst)) expect_true(lgb.is.Booster(bst))
...@@ -271,7 +264,7 @@ test_that("Loading a Booster from a string works", { ...@@ -271,7 +264,7 @@ test_that("Loading a Booster from a string works", {
num_leaves = 4L num_leaves = 4L
, learning_rate = 1.0 , learning_rate = 1.0
, objective = "binary" , objective = "binary"
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
, nrounds = 2L , nrounds = 2L
...@@ -307,7 +300,7 @@ test_that("Saving a large model to string should work", { ...@@ -307,7 +300,7 @@ test_that("Saving a large model to string should work", {
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
, nrounds = 500L , nrounds = 500L
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
) )
pred <- predict(bst, train$data) pred <- predict(bst, train$data)
...@@ -351,7 +344,7 @@ test_that("Saving a large model to JSON should work", { ...@@ -351,7 +344,7 @@ test_that("Saving a large model to JSON should work", {
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
, nrounds = 200L , nrounds = 200L
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
) )
model_json <- bst$dump_model() model_json <- bst$dump_model()
...@@ -378,7 +371,7 @@ test_that("If a string and a file are both passed to lgb.load() the file is used ...@@ -378,7 +371,7 @@ test_that("If a string and a file are both passed to lgb.load() the file is used
num_leaves = 4L num_leaves = 4L
, learning_rate = 1.0 , learning_rate = 1.0
, objective = "binary" , objective = "binary"
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
, nrounds = 2L , nrounds = 2L
...@@ -413,7 +406,7 @@ test_that("Creating a Booster from a Dataset should work", { ...@@ -413,7 +406,7 @@ test_that("Creating a Booster from a Dataset should work", {
bst <- Booster$new( bst <- Booster$new(
params = list( params = list(
objective = "binary" objective = "binary"
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
), ),
train_set = dtrain train_set = dtrain
...@@ -435,7 +428,7 @@ test_that("Creating a Booster from a Dataset with an existing predictor should w ...@@ -435,7 +428,7 @@ test_that("Creating a Booster from a Dataset with an existing predictor should w
num_leaves = 4L num_leaves = 4L
, learning_rate = 1.0 , learning_rate = 1.0
, objective = "binary" , objective = "binary"
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
, nrounds = nrounds , nrounds = nrounds
...@@ -449,7 +442,7 @@ test_that("Creating a Booster from a Dataset with an existing predictor should w ...@@ -449,7 +442,7 @@ test_that("Creating a Booster from a Dataset with an existing predictor should w
bst_from_ds <- Booster$new( bst_from_ds <- Booster$new(
train_set = dtest train_set = dtest
, params = list( , params = list(
verbose = VERBOSITY verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
) )
...@@ -473,7 +466,7 @@ test_that("Booster$eval() should work on a Dataset stored in a binary file", { ...@@ -473,7 +466,7 @@ test_that("Booster$eval() should work on a Dataset stored in a binary file", {
objective = "regression" objective = "regression"
, metric = "l2" , metric = "l2"
, num_leaves = 4L , num_leaves = 4L
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
, data = dtrain , data = dtrain
...@@ -504,14 +497,14 @@ test_that("Booster$eval() should work on a Dataset stored in a binary file", { ...@@ -504,14 +497,14 @@ test_that("Booster$eval() should work on a Dataset stored in a binary file", {
eval_from_file <- bst$eval( eval_from_file <- bst$eval(
data = lgb.Dataset( data = lgb.Dataset(
data = test_file data = test_file
, params = list(verbose = VERBOSITY, num_threads = .LGB_MAX_THREADS) , params = list(verbose = .LGB_VERBOSITY, num_threads = .LGB_MAX_THREADS)
)$construct() )$construct()
, name = "test" , name = "test"
) )
expect_true(abs(eval_in_mem[[1L]][["value"]] - 0.1744423) < TOLERANCE) expect_true(abs(eval_in_mem[[1L]][["value"]] - 0.1744423) < .LGB_NUMERIC_TOLERANCE)
# refer to https://github.com/microsoft/LightGBM/issues/4680 # refer to https://github.com/microsoft/LightGBM/issues/4680
if (isTRUE(ON_WINDOWS)) { if (isTRUE(.LGB_ON_WINDOWS)) {
expect_equal(eval_in_mem, eval_from_file) expect_equal(eval_in_mem, eval_from_file)
} else { } else {
expect_identical(eval_in_mem, eval_from_file) expect_identical(eval_in_mem, eval_from_file)
...@@ -532,7 +525,7 @@ test_that("Booster$rollback_one_iter() should work as expected", { ...@@ -532,7 +525,7 @@ test_that("Booster$rollback_one_iter() should work as expected", {
num_leaves = 4L num_leaves = 4L
, learning_rate = 1.0 , learning_rate = 1.0
, objective = "binary" , objective = "binary"
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
, nrounds = nrounds , nrounds = nrounds
...@@ -567,7 +560,7 @@ test_that("Booster$update() passing a train_set works as expected", { ...@@ -567,7 +560,7 @@ test_that("Booster$update() passing a train_set works as expected", {
num_leaves = 4L num_leaves = 4L
, learning_rate = 1.0 , learning_rate = 1.0
, objective = "binary" , objective = "binary"
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
, nrounds = nrounds , nrounds = nrounds
...@@ -578,7 +571,7 @@ test_that("Booster$update() passing a train_set works as expected", { ...@@ -578,7 +571,7 @@ test_that("Booster$update() passing a train_set works as expected", {
train_set = Dataset$new( train_set = Dataset$new(
data = agaricus.train$data data = agaricus.train$data
, label = agaricus.train$label , label = agaricus.train$label
, params = list(verbose = VERBOSITY) , params = list(verbose = .LGB_VERBOSITY)
) )
) )
expect_true(lgb.is.Booster(bst)) expect_true(lgb.is.Booster(bst))
...@@ -592,7 +585,7 @@ test_that("Booster$update() passing a train_set works as expected", { ...@@ -592,7 +585,7 @@ test_that("Booster$update() passing a train_set works as expected", {
num_leaves = 4L num_leaves = 4L
, learning_rate = 1.0 , learning_rate = 1.0
, objective = "binary" , objective = "binary"
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
, nrounds = nrounds + 1L , nrounds = nrounds + 1L
...@@ -618,7 +611,7 @@ test_that("Booster$update() throws an informative error if you provide a non-Dat ...@@ -618,7 +611,7 @@ test_that("Booster$update() throws an informative error if you provide a non-Dat
num_leaves = 4L num_leaves = 4L
, learning_rate = 1.0 , learning_rate = 1.0
, objective = "binary" , objective = "binary"
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
, nrounds = nrounds , nrounds = nrounds
...@@ -646,7 +639,7 @@ test_that("Booster should store parameters and Booster$reset_parameter() should ...@@ -646,7 +639,7 @@ test_that("Booster should store parameters and Booster$reset_parameter() should
, metric = c("multi_logloss", "multi_error") , metric = c("multi_logloss", "multi_error")
, boosting = "gbdt" , boosting = "gbdt"
, num_class = 5L , num_class = 5L
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
bst <- Booster$new( bst <- Booster$new(
...@@ -674,7 +667,7 @@ test_that("Booster$params should include dataset params, before and after Booste ...@@ -674,7 +667,7 @@ test_that("Booster$params should include dataset params, before and after Booste
objective = "binary" objective = "binary"
, max_depth = 4L , max_depth = 4L
, bagging_fraction = 0.8 , bagging_fraction = 0.8
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
bst <- Booster$new( bst <- Booster$new(
...@@ -687,7 +680,7 @@ test_that("Booster$params should include dataset params, before and after Booste ...@@ -687,7 +680,7 @@ test_that("Booster$params should include dataset params, before and after Booste
objective = "binary" objective = "binary"
, max_depth = 4L , max_depth = 4L
, bagging_fraction = 0.8 , bagging_fraction = 0.8
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
, max_bin = 17L , max_bin = 17L
) )
...@@ -699,7 +692,7 @@ test_that("Booster$params should include dataset params, before and after Booste ...@@ -699,7 +692,7 @@ test_that("Booster$params should include dataset params, before and after Booste
objective = "binary" objective = "binary"
, max_depth = 4L , max_depth = 4L
, bagging_fraction = 0.9 , bagging_fraction = 0.9
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
, max_bin = 17L , max_bin = 17L
) )
...@@ -718,7 +711,7 @@ test_that("Saving a model with different feature importance types works", { ...@@ -718,7 +711,7 @@ test_that("Saving a model with different feature importance types works", {
num_leaves = 4L num_leaves = 4L
, learning_rate = 1.0 , learning_rate = 1.0
, objective = "binary" , objective = "binary"
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
, nrounds = 2L , nrounds = 2L
...@@ -774,7 +767,7 @@ test_that("Saving a model with unknown importance type fails", { ...@@ -774,7 +767,7 @@ test_that("Saving a model with unknown importance type fails", {
num_leaves = 4L num_leaves = 4L
, learning_rate = 1.0 , learning_rate = 1.0
, objective = "binary" , objective = "binary"
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
, nrounds = 2L , nrounds = 2L
...@@ -815,7 +808,7 @@ test_that("all parameters are stored correctly with save_model_to_string()", { ...@@ -815,7 +808,7 @@ test_that("all parameters are stored correctly with save_model_to_string()", {
) )
, data = dtrain , data = dtrain
, nrounds = nrounds , nrounds = nrounds
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
) )
model_str <- bst$save_model_to_string() model_str <- bst$save_model_to_string()
...@@ -832,7 +825,7 @@ test_that("all parameters are stored correctly with save_model_to_string()", { ...@@ -832,7 +825,7 @@ test_that("all parameters are stored correctly with save_model_to_string()", {
expect_equal(sum(params_in_file == "[objective: regression]"), 1L) expect_equal(sum(params_in_file == "[objective: regression]"), 1L)
expect_equal(sum(startsWith(params_in_file, "[verbosity:")), 1L) expect_equal(sum(startsWith(params_in_file, "[verbosity:")), 1L)
expect_equal(sum(params_in_file == sprintf("[verbosity: %i]", VERBOSITY)), 1L) expect_equal(sum(params_in_file == sprintf("[verbosity: %i]", .LGB_VERBOSITY)), 1L)
# early stopping should be off by default # early stopping should be off by default
expect_equal(sum(startsWith(params_in_file, "[early_stopping_round:")), 1L) expect_equal(sum(startsWith(params_in_file, "[early_stopping_round:")), 1L)
...@@ -879,7 +872,7 @@ test_that("early_stopping, num_iterations are stored correctly in model string e ...@@ -879,7 +872,7 @@ test_that("early_stopping, num_iterations are stored correctly in model string e
, valids = list( , valids = list(
"random_valid" = dvalid "random_valid" = dvalid
) )
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
) )
model_str <- bst$save_model_to_string() model_str <- bst$save_model_to_string()
...@@ -911,7 +904,7 @@ test_that("Booster: method calls Booster with a null handle should raise an info ...@@ -911,7 +904,7 @@ test_that("Booster: method calls Booster with a null handle should raise an info
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
, data = dtrain , data = dtrain
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, nrounds = 5L , nrounds = 5L
, valids = list( , valids = list(
train = dtrain train = dtrain
...@@ -987,7 +980,7 @@ test_that("Booster$new() using a Dataset with a null handle should raise an info ...@@ -987,7 +980,7 @@ test_that("Booster$new() using a Dataset with a null handle should raise an info
bst <- Booster$new( bst <- Booster$new(
train_set = dtrain train_set = dtrain
, params = list( , params = list(
verbose = VERBOSITY verbose = .LGB_VERBOSITY
) )
) )
}, regexp = "Attempting to create a Dataset without any raw data") }, regexp = "Attempting to create a Dataset without any raw data")
...@@ -1098,7 +1091,7 @@ test_that("lgb.cv() correctly handles passing through params to the model file", ...@@ -1098,7 +1091,7 @@ test_that("lgb.cv() correctly handles passing through params to the model file",
, n_iter = n_iter , n_iter = n_iter
, early_stopping_round = early_stopping_round , early_stopping_round = early_stopping_round
, n_iter_no_change = n_iter_no_change , n_iter_no_change = n_iter_no_change
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
...@@ -1108,7 +1101,7 @@ test_that("lgb.cv() correctly handles passing through params to the model file", ...@@ -1108,7 +1101,7 @@ test_that("lgb.cv() correctly handles passing through params to the model file",
, nrounds = nrounds_kwarg , nrounds = nrounds_kwarg
, early_stopping_rounds = early_stopping_round_kwarg , early_stopping_rounds = early_stopping_round_kwarg
, nfold = 3L , nfold = 3L
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
) )
for (bst in cv_bst$boosters) { for (bst in cv_bst$boosters) {
...@@ -1143,7 +1136,7 @@ test_that("params (including dataset params) should be stored in .rds file for B ...@@ -1143,7 +1136,7 @@ test_that("params (including dataset params) should be stored in .rds file for B
objective = "binary" objective = "binary"
, max_depth = 4L , max_depth = 4L
, bagging_fraction = 0.8 , bagging_fraction = 0.8
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
bst <- Booster$new( bst <- Booster$new(
...@@ -1160,7 +1153,7 @@ test_that("params (including dataset params) should be stored in .rds file for B ...@@ -1160,7 +1153,7 @@ test_that("params (including dataset params) should be stored in .rds file for B
objective = "binary" objective = "binary"
, max_depth = 4L , max_depth = 4L
, bagging_fraction = 0.8 , bagging_fraction = 0.8
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
, max_bin = 17L , max_bin = 17L
) )
...@@ -1180,7 +1173,7 @@ test_that("params (including dataset params) should be stored in .rds file for B ...@@ -1180,7 +1173,7 @@ test_that("params (including dataset params) should be stored in .rds file for B
objective = "binary" objective = "binary"
, max_depth = 4L , max_depth = 4L
, bagging_fraction = 0.8 , bagging_fraction = 0.8
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
bst <- Booster$new( bst <- Booster$new(
...@@ -1197,7 +1190,7 @@ test_that("params (including dataset params) should be stored in .rds file for B ...@@ -1197,7 +1190,7 @@ test_that("params (including dataset params) should be stored in .rds file for B
objective = "binary" objective = "binary"
, max_depth = 4L , max_depth = 4L
, bagging_fraction = 0.8 , bagging_fraction = 0.8
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
, max_bin = 17L , max_bin = 17L
) )
...@@ -1212,7 +1205,7 @@ test_that("Handle is automatically restored when calling predict", { ...@@ -1212,7 +1205,7 @@ test_that("Handle is automatically restored when calling predict", {
, nrounds = 5L , nrounds = 5L
, obj = "binary" , obj = "binary"
, params = list( , params = list(
verbose = VERBOSITY verbose = .LGB_VERBOSITY
) )
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
...@@ -1236,7 +1229,7 @@ test_that("boosters with linear models at leaves work with saveRDS.lgb.Booster a ...@@ -1236,7 +1229,7 @@ test_that("boosters with linear models at leaves work with saveRDS.lgb.Booster a
params <- list( params <- list(
objective = "regression" objective = "regression"
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, metric = "mse" , metric = "mse"
, seed = 0L , seed = 0L
, num_leaves = 2L , num_leaves = 2L
...@@ -1276,7 +1269,7 @@ test_that("boosters with linear models at leaves can be written to RDS and re-lo ...@@ -1276,7 +1269,7 @@ test_that("boosters with linear models at leaves can be written to RDS and re-lo
params <- list( params <- list(
objective = "regression" objective = "regression"
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, metric = "mse" , metric = "mse"
, seed = 0L , seed = 0L
, num_leaves = 2L , num_leaves = 2L
...@@ -1386,7 +1379,7 @@ test_that("Booster's print, show, and summary work correctly", { ...@@ -1386,7 +1379,7 @@ test_that("Booster's print, show, and summary work correctly", {
min_data_in_bin = 1L min_data_in_bin = 1L
) )
) )
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, nrounds = 5L , nrounds = 5L
) )
.check_methods_work(model) .check_methods_work(model)
...@@ -1398,7 +1391,7 @@ test_that("Booster's print, show, and summary work correctly", { ...@@ -1398,7 +1391,7 @@ test_that("Booster's print, show, and summary work correctly", {
as.matrix(iris[, -5L]) as.matrix(iris[, -5L])
, label = as.numeric(factor(iris$Species)) - 1.0 , label = as.numeric(factor(iris$Species)) - 1.0
) )
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, nrounds = 5L , nrounds = 5L
) )
.check_methods_work(model) .check_methods_work(model)
...@@ -1431,7 +1424,7 @@ test_that("Booster's print, show, and summary work correctly", { ...@@ -1431,7 +1424,7 @@ test_that("Booster's print, show, and summary work correctly", {
) )
, obj = .logregobj , obj = .logregobj
, eval = .evalerror , eval = .evalerror
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, nrounds = 5L , nrounds = 5L
, params = list(num_threads = .LGB_MAX_THREADS) , params = list(num_threads = .LGB_MAX_THREADS)
) )
...@@ -1454,7 +1447,7 @@ test_that("LGBM_BoosterGetNumFeature_R returns correct outputs", { ...@@ -1454,7 +1447,7 @@ test_that("LGBM_BoosterGetNumFeature_R returns correct outputs", {
min_data_in_bin = 1L min_data_in_bin = 1L
) )
) )
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, nrounds = 5L , nrounds = 5L
) )
ncols <- .Call(LGBM_BoosterGetNumFeature_R, model$.__enclos_env__$private$handle) ncols <- .Call(LGBM_BoosterGetNumFeature_R, model$.__enclos_env__$private$handle)
...@@ -1467,7 +1460,7 @@ test_that("LGBM_BoosterGetNumFeature_R returns correct outputs", { ...@@ -1467,7 +1460,7 @@ test_that("LGBM_BoosterGetNumFeature_R returns correct outputs", {
as.matrix(iris[, -5L]) as.matrix(iris[, -5L])
, label = as.numeric(factor(iris$Species)) - 1.0 , label = as.numeric(factor(iris$Species)) - 1.0
) )
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, nrounds = 5L , nrounds = 5L
) )
ncols <- .Call(LGBM_BoosterGetNumFeature_R, model$.__enclos_env__$private$handle) ncols <- .Call(LGBM_BoosterGetNumFeature_R, model$.__enclos_env__$private$handle)
......
VERBOSITY <- as.integer(
Sys.getenv("LIGHTGBM_TEST_VERBOSITY", "-1")
)
.sigmoid <- function(x) { .sigmoid <- function(x) {
1.0 / (1.0 + exp(-x)) 1.0 / (1.0 + exp(-x))
} }
...@@ -30,7 +26,7 @@ test_that("lgb.intereprete works as expected for binary classification", { ...@@ -30,7 +26,7 @@ test_that("lgb.intereprete works as expected for binary classification", {
, max_depth = -1L , max_depth = -1L
, min_data_in_leaf = 1L , min_data_in_leaf = 1L
, min_sum_hessian_in_leaf = 1.0 , min_sum_hessian_in_leaf = 1.0
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
model <- lgb.train( model <- lgb.train(
...@@ -83,7 +79,7 @@ test_that("lgb.intereprete works as expected for multiclass classification", { ...@@ -83,7 +79,7 @@ test_that("lgb.intereprete works as expected for multiclass classification", {
, num_class = 3L , num_class = 3L
, learning_rate = 0.00001 , learning_rate = 0.00001
, min_data = 1L , min_data = 1L
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
model <- lgb.train( model <- lgb.train(
......
VERBOSITY <- as.integer(
Sys.getenv("LIGHTGBM_TEST_VERBOSITY", "-1")
)
test_that("lgb.plot.importance() should run without error for well-formed inputs", { test_that("lgb.plot.importance() should run without error for well-formed inputs", {
data(agaricus.train, package = "lightgbm") data(agaricus.train, package = "lightgbm")
train <- agaricus.train train <- agaricus.train
...@@ -13,7 +9,7 @@ test_that("lgb.plot.importance() should run without error for well-formed inputs ...@@ -13,7 +9,7 @@ test_that("lgb.plot.importance() should run without error for well-formed inputs
, max_depth = -1L , max_depth = -1L
, min_data_in_leaf = 1L , min_data_in_leaf = 1L
, min_sum_hessian_in_leaf = 1.0 , min_sum_hessian_in_leaf = 1.0
, verbosity = VERBOSITY , verbosity = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
model <- lgb.train(params, dtrain, 3L) model <- lgb.train(params, dtrain, 3L)
......
VERBOSITY <- as.integer(
Sys.getenv("LIGHTGBM_TEST_VERBOSITY", "-1")
)
.sigmoid <- function(x) { .sigmoid <- function(x) {
1.0 / (1.0 + exp(-x)) 1.0 / (1.0 + exp(-x))
} }
...@@ -30,7 +26,7 @@ test_that("lgb.plot.interepretation works as expected for binary classification" ...@@ -30,7 +26,7 @@ test_that("lgb.plot.interepretation works as expected for binary classification"
, max_depth = -1L , max_depth = -1L
, min_data_in_leaf = 1L , min_data_in_leaf = 1L
, min_sum_hessian_in_leaf = 1.0 , min_sum_hessian_in_leaf = 1.0
, verbosity = VERBOSITY , verbosity = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS , num_threads = .LGB_MAX_THREADS
) )
model <- lgb.train( model <- lgb.train(
...@@ -87,7 +83,7 @@ test_that("lgb.plot.interepretation works as expected for multiclass classificat ...@@ -87,7 +83,7 @@ test_that("lgb.plot.interepretation works as expected for multiclass classificat
params = params params = params
, data = dtrain , data = dtrain
, nrounds = 3L , nrounds = 3L
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
) )
num_trees <- 5L num_trees <- 5L
tree_interpretation <- lgb.interprete( tree_interpretation <- lgb.interprete(
......
VERBOSITY <- as.integer(
Sys.getenv("LIGHTGBM_TEST_VERBOSITY", "-1")
)
test_that("Gamma regression reacts on 'weight'", { test_that("Gamma regression reacts on 'weight'", {
n <- 100L n <- 100L
set.seed(87L) set.seed(87L)
...@@ -17,7 +13,7 @@ test_that("Gamma regression reacts on 'weight'", { ...@@ -17,7 +13,7 @@ test_that("Gamma regression reacts on 'weight'", {
params = params params = params
, data = dtrain , data = dtrain
, nrounds = 4L , nrounds = 4L
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
) )
pred_unweighted <- predict(bst, X_pred) pred_unweighted <- predict(bst, X_pred)
...@@ -31,7 +27,7 @@ test_that("Gamma regression reacts on 'weight'", { ...@@ -31,7 +27,7 @@ test_that("Gamma regression reacts on 'weight'", {
params = params params = params
, data = dtrain , data = dtrain
, nrounds = 4L , nrounds = 4L
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
) )
pred_weighted_1 <- predict(bst, X_pred) pred_weighted_1 <- predict(bst, X_pred)
...@@ -45,7 +41,7 @@ test_that("Gamma regression reacts on 'weight'", { ...@@ -45,7 +41,7 @@ test_that("Gamma regression reacts on 'weight'", {
params = params params = params
, data = dtrain , data = dtrain
, nrounds = 4L , nrounds = 4L
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
) )
pred_weighted_2 <- predict(bst, X_pred) pred_weighted_2 <- predict(bst, X_pred)
...@@ -59,7 +55,7 @@ test_that("Gamma regression reacts on 'weight'", { ...@@ -59,7 +55,7 @@ test_that("Gamma regression reacts on 'weight'", {
params = params params = params
, data = dtrain , data = dtrain
, nrounds = 4L , nrounds = 4L
, verbose = VERBOSITY , verbose = .LGB_VERBOSITY
) )
pred_weighted <- predict(bst, X_pred) pred_weighted <- predict(bst, X_pred)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment