Unverified Commit 44928d3a authored by James Lamb's avatar James Lamb Committed by GitHub
Browse files

[R-package] consolidate testing constants in helpers file (#5992)

parent 8967debe
# ref for this file:
#
# * https://r-pkgs.org/testing-design.html#testthat-helper-files
# * https://r-pkgs.org/testing-design.html#testthat-setup-files
# LightGBM-internal fix to comply with CRAN policy of only using up to 2 threads in tests and example.
......@@ -10,3 +11,21 @@
# the check farm is a shared resource and will typically be running many checks simultaneously.
#
.LGB_MAX_THREADS <- 2L
# by default, how much should results in tests be allowed to differ from hard-coded expected numbers?
.LGB_NUMERIC_TOLERANCE <- 1e-6
# are the tests running on Windows?
.LGB_ON_WINDOWS <- .Platform$OS.type == "windows"
.LGB_ON_32_BIT_WINDOWS <- .LGB_ON_WINDOWS && .Machine$sizeof.pointer != 8L
# are the tests running in a UTF-8 locale?
.LGB_UTF8_LOCALE <- all(endsWith(
Sys.getlocale(category = "LC_CTYPE")
, "UTF-8"
))
# control how many loud LightGBM's logger is in tests
.LGB_VERBOSITY <- as.integer(
Sys.getenv("LIGHTGBM_TEST_VERBOSITY", "-1")
)
library(Matrix)
VERBOSITY <- as.integer(
Sys.getenv("LIGHTGBM_TEST_VERBOSITY", "-1")
)
TOLERANCE <- 1e-6
test_that("Predictor$finalize() should not fail", {
X <- as.matrix(as.integer(iris[, "Species"]), ncol = 1L)
y <- iris[["Sepal.Length"]]
......@@ -16,7 +10,7 @@ test_that("Predictor$finalize() should not fail", {
objective = "regression"
, num_threads = .LGB_MAX_THREADS
)
, verbose = VERBOSITY
, verbose = .LGB_VERBOSITY
, nrounds = 3L
)
model_file <- tempfile(fileext = ".model")
......@@ -45,7 +39,7 @@ test_that("predictions do not fail for integer input", {
objective = "regression"
, num_threads = .LGB_MAX_THREADS
)
, verbose = VERBOSITY
, verbose = .LGB_VERBOSITY
, nrounds = 3L
)
X_double <- X[c(1L, 51L, 101L), , drop = FALSE]
......@@ -78,7 +72,7 @@ test_that("start_iteration works correctly", {
num_leaves = 4L
, learning_rate = 0.6
, objective = "binary"
, verbosity = VERBOSITY
, verbosity = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS
)
, nrounds = 50L
......@@ -128,7 +122,7 @@ test_that("Feature contributions from sparse inputs produce sparse outputs", {
data = dtrain
, obj = "regression"
, nrounds = 5L
, verbose = VERBOSITY
, verbose = .LGB_VERBOSITY
, params = list(min_data_in_leaf = 5L, num_threads = .LGB_MAX_THREADS)
)
......@@ -159,7 +153,7 @@ test_that("Sparse feature contribution predictions do not take inputs with wrong
data = dtrain
, obj = "regression"
, nrounds = 5L
, verbose = VERBOSITY
, verbose = .LGB_VERBOSITY
, params = list(min_data_in_leaf = 5L, num_threads = .LGB_MAX_THREADS)
)
......@@ -189,7 +183,7 @@ test_that("Feature contribution predictions do not take non-general CSR or CSC i
data = dtrain
, obj = "regression"
, nrounds = 5L
, verbose = VERBOSITY
, verbose = .LGB_VERBOSITY
, params = list(min_data_in_leaf = 5L, num_threads = .LGB_MAX_THREADS)
)
......@@ -217,14 +211,14 @@ test_that("predict() params should override keyword argument for raw-score predi
, num_threads = .LGB_MAX_THREADS
)
, nrounds = 10L
, verbose = VERBOSITY
, verbose = .LGB_VERBOSITY
)
# check that the predictions from predict.lgb.Booster() really look like raw score predictions
preds_prob <- predict(bst, X)
preds_raw_s3_keyword <- predict(bst, X, type = "raw")
preds_prob_from_raw <- 1.0 / (1.0 + exp(-preds_raw_s3_keyword))
expect_equal(preds_prob, preds_prob_from_raw, tolerance = TOLERANCE)
expect_equal(preds_prob, preds_prob_from_raw, tolerance = .LGB_NUMERIC_TOLERANCE)
accuracy <- sum(as.integer(preds_prob_from_raw > 0.5) == y) / length(y)
expect_equal(accuracy, 1.0)
......@@ -269,7 +263,7 @@ test_that("predict() params should override keyword argument for leaf-index pred
, num_threads = .LGB_MAX_THREADS
)
, nrounds = 10L
, verbose = VERBOSITY
, verbose = .LGB_VERBOSITY
)
# check that predictions really look like leaf index predictions
......@@ -323,7 +317,7 @@ test_that("predict() params should override keyword argument for feature contrib
, num_threads = .LGB_MAX_THREADS
)
, nrounds = 10L
, verbose = VERBOSITY
, verbose = .LGB_VERBOSITY
)
# check that predictions really look like feature contributions
......@@ -431,7 +425,7 @@ test_that("predict() keeps row names from data (regression)", {
data = dtrain
, obj = "regression"
, nrounds = 5L
, verbose = VERBOSITY
, verbose = .LGB_VERBOSITY
, params = list(min_data_in_leaf = 1L, num_threads = .LGB_MAX_THREADS)
)
.check_all_row_name_expectations(bst, X)
......@@ -447,7 +441,7 @@ test_that("predict() keeps row names from data (binary classification)", {
data = dtrain
, obj = "binary"
, nrounds = 5L
, verbose = VERBOSITY
, verbose = .LGB_VERBOSITY
, params = list(num_threads = .LGB_MAX_THREADS)
)
.check_all_row_name_expectations(bst, X)
......@@ -464,7 +458,7 @@ test_that("predict() keeps row names from data (multi-class classification)", {
, obj = "multiclass"
, params = list(num_class = 3L, num_threads = .LGB_MAX_THREADS)
, nrounds = 5L
, verbose = VERBOSITY
, verbose = .LGB_VERBOSITY
)
.check_all_row_name_expectations(bst, X)
})
......@@ -485,7 +479,7 @@ test_that("predictions for regression and binary classification are returned as
data = dtrain
, obj = "regression"
, nrounds = 5L
, verbose = VERBOSITY
, verbose = .LGB_VERBOSITY
, params = list(min_data_in_leaf = 1L, num_threads = .LGB_MAX_THREADS)
)
pred <- predict(model, X)
......@@ -503,7 +497,7 @@ test_that("predictions for regression and binary classification are returned as
data = dtrain
, obj = "binary"
, nrounds = 5L
, verbose = VERBOSITY
, verbose = .LGB_VERBOSITY
, params = list(num_threads = .LGB_MAX_THREADS)
)
pred <- predict(model, X)
......@@ -523,7 +517,7 @@ test_that("predictions for multiclass classification are returned as matrix", {
data = dtrain
, obj = "multiclass"
, nrounds = 5L
, verbose = VERBOSITY
, verbose = .LGB_VERBOSITY
, params = list(num_class = 3L, num_threads = .LGB_MAX_THREADS)
)
pred <- predict(model, X)
......@@ -668,7 +662,7 @@ test_that("predict type='class' returns predicted class for classification objec
data = dtrain
, obj = "binary"
, nrounds = 5L
, verbose = VERBOSITY
, verbose = .LGB_VERBOSITY
, params = list(num_threads = .LGB_MAX_THREADS)
)
pred <- predict(bst, X, type = "class")
......@@ -682,7 +676,7 @@ test_that("predict type='class' returns predicted class for classification objec
data = dtrain
, obj = "multiclass"
, nrounds = 5L
, verbose = VERBOSITY
, verbose = .LGB_VERBOSITY
, params = list(num_class = 3L, num_threads = .LGB_MAX_THREADS)
)
pred <- predict(model, X, type = "class")
......@@ -698,7 +692,7 @@ test_that("predict type='class' returns values in the target's range for regress
data = dtrain
, obj = "regression"
, nrounds = 5L
, verbose = VERBOSITY
, verbose = .LGB_VERBOSITY
, params = list(num_threads = .LGB_MAX_THREADS)
)
pred <- predict(bst, X, type = "class")
......
This diff is collapsed.
VERBOSITY <- as.integer(
Sys.getenv("LIGHTGBM_TEST_VERBOSITY", "-1")
)
data(agaricus.train, package = "lightgbm")
data(agaricus.test, package = "lightgbm")
dtrain <- lgb.Dataset(agaricus.train$data, label = agaricus.train$label)
dtest <- lgb.Dataset(agaricus.test$data, label = agaricus.test$label)
watchlist <- list(eval = dtest, train = dtrain)
TOLERANCE <- 1e-6
logregobj <- function(preds, dtrain) {
labels <- get_field(dtrain, "label")
preds <- 1.0 / (1.0 + exp(-preds))
......@@ -38,7 +32,7 @@ param <- list(
, learning_rate = 1.0
, objective = logregobj
, metric = "auc"
, verbose = VERBOSITY
, verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS
)
num_round <- 10L
......@@ -54,7 +48,7 @@ test_that("using a custom objective, custom eval, and no other metrics works", {
params = list(
num_leaves = 8L
, learning_rate = 1.0
, verbose = VERBOSITY
, verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS
)
, data = dtrain
......@@ -65,11 +59,11 @@ test_that("using a custom objective, custom eval, and no other metrics works", {
)
expect_false(is.null(bst$record_evals))
expect_equal(bst$best_iter, 4L)
expect_true(abs(bst$best_score - 0.000621) < TOLERANCE)
expect_true(abs(bst$best_score - 0.000621) < .LGB_NUMERIC_TOLERANCE)
eval_results <- bst$eval_valid(feval = evalerror)[[1L]]
expect_true(eval_results[["data_name"]] == "eval")
expect_true(abs(eval_results[["value"]] - 0.0006207325) < TOLERANCE)
expect_true(abs(eval_results[["value"]] - 0.0006207325) < .LGB_NUMERIC_TOLERANCE)
expect_true(eval_results[["name"]] == "error")
expect_false(eval_results[["higher_better"]])
})
......@@ -81,7 +75,7 @@ test_that("using a custom objective that returns wrong shape grad or hess raises
bad_hess <- function(preds, dtrain) {
return(list(grad = rep(1.0, length(preds)), hess = numeric(0L)))
}
params <- list(num_leaves = 3L, verbose = VERBOSITY)
params <- list(num_leaves = 3L, verbose = .LGB_VERBOSITY)
expect_error({
lgb.train(params = params, data = dtrain, obj = bad_grad)
}, sprintf("Expected custom objective function to return grad with length %d, got 0.", nrow(dtrain)))
......
VERBOSITY <- as.integer(
Sys.getenv("LIGHTGBM_TEST_VERBOSITY", "-1")
)
data(agaricus.train, package = "lightgbm")
train_data <- agaricus.train$data[seq_len(1000L), ]
train_label <- agaricus.train$label[seq_len(1000L)]
......@@ -16,7 +12,7 @@ test_that("lgb.Dataset: basic construction, saving, loading", {
test_data
, label = test_label
, params = list(
verbose = VERBOSITY
verbose = .LGB_VERBOSITY
)
)
# from dense matrix
......@@ -30,7 +26,7 @@ test_that("lgb.Dataset: basic construction, saving, loading", {
dtest3 <- lgb.Dataset(
tmp_file
, params = list(
verbose = VERBOSITY
verbose = .LGB_VERBOSITY
)
)
lgb.Dataset.construct(dtest3)
......@@ -376,7 +372,7 @@ test_that("lgb.Dataset: should be able to run lgb.train() immediately after usin
data = test_data
, label = test_label
, params = list(
verbose = VERBOSITY
verbose = .LGB_VERBOSITY
)
)
tmp_file <- tempfile(pattern = "lgb.Dataset_")
......@@ -393,7 +389,7 @@ test_that("lgb.Dataset: should be able to run lgb.train() immediately after usin
, metric = "binary_logloss"
, num_leaves = 5L
, learning_rate = 1.0
, verbose = VERBOSITY
, verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS
)
......@@ -411,7 +407,7 @@ test_that("lgb.Dataset: should be able to run lgb.cv() immediately after using l
data = test_data
, label = test_label
, params = list(
verbosity = VERBOSITY
verbosity = .LGB_VERBOSITY
)
)
tmp_file <- tempfile(pattern = "lgb.Dataset_")
......@@ -429,7 +425,7 @@ test_that("lgb.Dataset: should be able to run lgb.cv() immediately after using l
, num_leaves = 5L
, learning_rate = 1.0
, num_iterations = 5L
, verbosity = VERBOSITY
, verbosity = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS
)
......@@ -475,7 +471,7 @@ test_that("lgb.Dataset: should be able to create a Dataset from a text file with
data = train_file
, params = list(
header = TRUE
, verbosity = VERBOSITY
, verbosity = .LGB_VERBOSITY
)
)
dtrain$construct()
......@@ -499,7 +495,7 @@ test_that("lgb.Dataset: should be able to create a Dataset from a text file with
data = train_file
, params = list(
header = FALSE
, verbosity = VERBOSITY
, verbosity = .LGB_VERBOSITY
)
)
dtrain$construct()
......
VERBOSITY <- as.integer(
Sys.getenv("LIGHTGBM_TEST_VERBOSITY", "-1")
)
# numerical tolerance to use when checking metric values
TOLERANCE <- 1e-06
ON_32_BIT_WINDOWS <- .Platform$OS.type == "windows" && .Machine$sizeof.pointer != 8L
test_that("learning-to-rank with lgb.train() works as expected", {
set.seed(708L)
data(agaricus.train, package = "lightgbm")
......@@ -26,7 +17,7 @@ test_that("learning-to-rank with lgb.train() works as expected", {
, ndcg_at = ndcg_at
, lambdarank_truncation_level = 3L
, learning_rate = 0.001
, verbose = VERBOSITY
, verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS
)
model <- lgb.train(
......@@ -60,15 +51,15 @@ test_that("learning-to-rank with lgb.train() works as expected", {
, eval_names
)
expect_equal(eval_results[[1L]][["value"]], 0.775)
if (!ON_32_BIT_WINDOWS) {
expect_true(abs(eval_results[[2L]][["value"]] - 0.745986) < TOLERANCE)
expect_true(abs(eval_results[[3L]][["value"]] - 0.7351959) < TOLERANCE)
if (!.LGB_ON_32_BIT_WINDOWS) {
expect_true(abs(eval_results[[2L]][["value"]] - 0.745986) < .LGB_NUMERIC_TOLERANCE)
expect_true(abs(eval_results[[3L]][["value"]] - 0.7351959) < .LGB_NUMERIC_TOLERANCE)
}
})
test_that("learning-to-rank with lgb.cv() works as expected", {
testthat::skip_if(
ON_32_BIT_WINDOWS
.LGB_ON_32_BIT_WINDOWS
, message = "Skipping on 32-bit Windows"
)
set.seed(708L)
......@@ -91,7 +82,7 @@ test_that("learning-to-rank with lgb.cv() works as expected", {
, label_gain = "0,1,3"
, min_data = 1L
, learning_rate = 0.01
, verbose = VERBOSITY
, verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS
)
nfold <- 4L
......@@ -115,7 +106,7 @@ test_that("learning-to-rank with lgb.cv() works as expected", {
best_score <- cv_bst$best_score
expect_true(best_iter > 0L && best_iter <= nrounds)
expect_true(best_score > 0.0 && best_score < 1.0)
expect_true(abs(best_score - 0.75) < TOLERANCE)
expect_true(abs(best_score - 0.75) < .LGB_NUMERIC_TOLERANCE)
# best_score should be set for the first metric
first_metric <- eval_names[[1L]]
......@@ -138,19 +129,19 @@ test_that("learning-to-rank with lgb.cv() works as expected", {
# first and last value of each metric should be as expected
ndcg1_values <- c(0.675, 0.725, 0.65, 0.725, 0.75, 0.725, 0.75, 0.725, 0.75, 0.75)
expect_true(all(abs(unlist(eval_results[["ndcg@1"]][["eval"]]) - ndcg1_values) < TOLERANCE))
expect_true(all(abs(unlist(eval_results[["ndcg@1"]][["eval"]]) - ndcg1_values) < .LGB_NUMERIC_TOLERANCE))
ndcg2_values <- c(
0.6556574, 0.6669721, 0.6306574, 0.6476294, 0.6629581,
0.6476294, 0.6629581, 0.6379581, 0.7113147, 0.6823008
)
expect_true(all(abs(unlist(eval_results[["ndcg@2"]][["eval"]]) - ndcg2_values) < TOLERANCE))
expect_true(all(abs(unlist(eval_results[["ndcg@2"]][["eval"]]) - ndcg2_values) < .LGB_NUMERIC_TOLERANCE))
ndcg3_values <- c(
0.6484639, 0.6571238, 0.6469279, 0.6540516, 0.6481857,
0.6481857, 0.6481857, 0.6466496, 0.7027939, 0.6629898
)
expect_true(all(abs(unlist(eval_results[["ndcg@3"]][["eval"]]) - ndcg3_values) < TOLERANCE))
expect_true(all(abs(unlist(eval_results[["ndcg@3"]][["eval"]]) - ndcg3_values) < .LGB_NUMERIC_TOLERANCE))
# check details of each booster
for (bst in cv_bst$boosters) {
......
VERBOSITY <- as.integer(
Sys.getenv("LIGHTGBM_TEST_VERBOSITY", "-1")
)
ON_WINDOWS <- .Platform$OS.type == "windows"
TOLERANCE <- 1e-6
test_that("Booster$finalize() should not fail", {
X <- as.matrix(as.integer(iris[, "Species"]), ncol = 1L)
y <- iris[["Sepal.Length"]]
......@@ -15,7 +8,7 @@ test_that("Booster$finalize() should not fail", {
objective = "regression"
, num_threads = .LGB_MAX_THREADS
)
, verbose = VERBOSITY
, verbose = .LGB_VERBOSITY
, nrounds = 3L
)
expect_true(lgb.is.Booster(bst))
......@@ -66,7 +59,7 @@ test_that("lgb.get.eval.result() should throw an informative error for incorrect
, metric = "l2"
, min_data = 1L
, learning_rate = 1.0
, verbose = VERBOSITY
, verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS
)
, data = dtrain
......@@ -101,7 +94,7 @@ test_that("lgb.get.eval.result() should throw an informative error for incorrect
, metric = "l2"
, min_data = 1L
, learning_rate = 1.0
, verbose = VERBOSITY
, verbose = .LGB_VERBOSITY
)
, data = dtrain
, nrounds = 5L
......@@ -135,7 +128,7 @@ test_that("lgb.load() gives the expected error messages given different incorrec
objective = "binary"
, num_leaves = 4L
, learning_rate = 1.0
, verbose = VERBOSITY
, verbose = .LGB_VERBOSITY
)
, nrounds = 2L
)
......@@ -186,7 +179,7 @@ test_that("Loading a Booster from a text file works", {
, metric = c("mape", "average_precision")
, learning_rate = 1.0
, objective = "binary"
, verbosity = VERBOSITY
, verbosity = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS
)
bst <- lightgbm(
......@@ -237,7 +230,7 @@ test_that("boosters with linear models at leaves can be written to text file and
data = dtrain
, nrounds = 10L
, params = params
, verbose = VERBOSITY
, verbose = .LGB_VERBOSITY
)
expect_true(lgb.is.Booster(bst))
......@@ -271,7 +264,7 @@ test_that("Loading a Booster from a string works", {
num_leaves = 4L
, learning_rate = 1.0
, objective = "binary"
, verbose = VERBOSITY
, verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS
)
, nrounds = 2L
......@@ -307,7 +300,7 @@ test_that("Saving a large model to string should work", {
, num_threads = .LGB_MAX_THREADS
)
, nrounds = 500L
, verbose = VERBOSITY
, verbose = .LGB_VERBOSITY
)
pred <- predict(bst, train$data)
......@@ -351,7 +344,7 @@ test_that("Saving a large model to JSON should work", {
, num_threads = .LGB_MAX_THREADS
)
, nrounds = 200L
, verbose = VERBOSITY
, verbose = .LGB_VERBOSITY
)
model_json <- bst$dump_model()
......@@ -378,7 +371,7 @@ test_that("If a string and a file are both passed to lgb.load() the file is used
num_leaves = 4L
, learning_rate = 1.0
, objective = "binary"
, verbose = VERBOSITY
, verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS
)
, nrounds = 2L
......@@ -413,7 +406,7 @@ test_that("Creating a Booster from a Dataset should work", {
bst <- Booster$new(
params = list(
objective = "binary"
, verbose = VERBOSITY
, verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS
),
train_set = dtrain
......@@ -435,7 +428,7 @@ test_that("Creating a Booster from a Dataset with an existing predictor should w
num_leaves = 4L
, learning_rate = 1.0
, objective = "binary"
, verbose = VERBOSITY
, verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS
)
, nrounds = nrounds
......@@ -449,7 +442,7 @@ test_that("Creating a Booster from a Dataset with an existing predictor should w
bst_from_ds <- Booster$new(
train_set = dtest
, params = list(
verbose = VERBOSITY
verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS
)
)
......@@ -473,7 +466,7 @@ test_that("Booster$eval() should work on a Dataset stored in a binary file", {
objective = "regression"
, metric = "l2"
, num_leaves = 4L
, verbose = VERBOSITY
, verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS
)
, data = dtrain
......@@ -504,14 +497,14 @@ test_that("Booster$eval() should work on a Dataset stored in a binary file", {
eval_from_file <- bst$eval(
data = lgb.Dataset(
data = test_file
, params = list(verbose = VERBOSITY, num_threads = .LGB_MAX_THREADS)
, params = list(verbose = .LGB_VERBOSITY, num_threads = .LGB_MAX_THREADS)
)$construct()
, name = "test"
)
expect_true(abs(eval_in_mem[[1L]][["value"]] - 0.1744423) < TOLERANCE)
expect_true(abs(eval_in_mem[[1L]][["value"]] - 0.1744423) < .LGB_NUMERIC_TOLERANCE)
# refer to https://github.com/microsoft/LightGBM/issues/4680
if (isTRUE(ON_WINDOWS)) {
if (isTRUE(.LGB_ON_WINDOWS)) {
expect_equal(eval_in_mem, eval_from_file)
} else {
expect_identical(eval_in_mem, eval_from_file)
......@@ -532,7 +525,7 @@ test_that("Booster$rollback_one_iter() should work as expected", {
num_leaves = 4L
, learning_rate = 1.0
, objective = "binary"
, verbose = VERBOSITY
, verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS
)
, nrounds = nrounds
......@@ -567,7 +560,7 @@ test_that("Booster$update() passing a train_set works as expected", {
num_leaves = 4L
, learning_rate = 1.0
, objective = "binary"
, verbose = VERBOSITY
, verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS
)
, nrounds = nrounds
......@@ -578,7 +571,7 @@ test_that("Booster$update() passing a train_set works as expected", {
train_set = Dataset$new(
data = agaricus.train$data
, label = agaricus.train$label
, params = list(verbose = VERBOSITY)
, params = list(verbose = .LGB_VERBOSITY)
)
)
expect_true(lgb.is.Booster(bst))
......@@ -592,7 +585,7 @@ test_that("Booster$update() passing a train_set works as expected", {
num_leaves = 4L
, learning_rate = 1.0
, objective = "binary"
, verbose = VERBOSITY
, verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS
)
, nrounds = nrounds + 1L
......@@ -618,7 +611,7 @@ test_that("Booster$update() throws an informative error if you provide a non-Dat
num_leaves = 4L
, learning_rate = 1.0
, objective = "binary"
, verbose = VERBOSITY
, verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS
)
, nrounds = nrounds
......@@ -646,7 +639,7 @@ test_that("Booster should store parameters and Booster$reset_parameter() should
, metric = c("multi_logloss", "multi_error")
, boosting = "gbdt"
, num_class = 5L
, verbose = VERBOSITY
, verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS
)
bst <- Booster$new(
......@@ -674,7 +667,7 @@ test_that("Booster$params should include dataset params, before and after Booste
objective = "binary"
, max_depth = 4L
, bagging_fraction = 0.8
, verbose = VERBOSITY
, verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS
)
bst <- Booster$new(
......@@ -687,7 +680,7 @@ test_that("Booster$params should include dataset params, before and after Booste
objective = "binary"
, max_depth = 4L
, bagging_fraction = 0.8
, verbose = VERBOSITY
, verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS
, max_bin = 17L
)
......@@ -699,7 +692,7 @@ test_that("Booster$params should include dataset params, before and after Booste
objective = "binary"
, max_depth = 4L
, bagging_fraction = 0.9
, verbose = VERBOSITY
, verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS
, max_bin = 17L
)
......@@ -718,7 +711,7 @@ test_that("Saving a model with different feature importance types works", {
num_leaves = 4L
, learning_rate = 1.0
, objective = "binary"
, verbose = VERBOSITY
, verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS
)
, nrounds = 2L
......@@ -774,7 +767,7 @@ test_that("Saving a model with unknown importance type fails", {
num_leaves = 4L
, learning_rate = 1.0
, objective = "binary"
, verbose = VERBOSITY
, verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS
)
, nrounds = 2L
......@@ -815,7 +808,7 @@ test_that("all parameters are stored correctly with save_model_to_string()", {
)
, data = dtrain
, nrounds = nrounds
, verbose = VERBOSITY
, verbose = .LGB_VERBOSITY
)
model_str <- bst$save_model_to_string()
......@@ -832,7 +825,7 @@ test_that("all parameters are stored correctly with save_model_to_string()", {
expect_equal(sum(params_in_file == "[objective: regression]"), 1L)
expect_equal(sum(startsWith(params_in_file, "[verbosity:")), 1L)
expect_equal(sum(params_in_file == sprintf("[verbosity: %i]", VERBOSITY)), 1L)
expect_equal(sum(params_in_file == sprintf("[verbosity: %i]", .LGB_VERBOSITY)), 1L)
# early stopping should be off by default
expect_equal(sum(startsWith(params_in_file, "[early_stopping_round:")), 1L)
......@@ -879,7 +872,7 @@ test_that("early_stopping, num_iterations are stored correctly in model string e
, valids = list(
"random_valid" = dvalid
)
, verbose = VERBOSITY
, verbose = .LGB_VERBOSITY
)
model_str <- bst$save_model_to_string()
......@@ -911,7 +904,7 @@ test_that("Booster: method calls Booster with a null handle should raise an info
, num_threads = .LGB_MAX_THREADS
)
, data = dtrain
, verbose = VERBOSITY
, verbose = .LGB_VERBOSITY
, nrounds = 5L
, valids = list(
train = dtrain
......@@ -987,7 +980,7 @@ test_that("Booster$new() using a Dataset with a null handle should raise an info
bst <- Booster$new(
train_set = dtrain
, params = list(
verbose = VERBOSITY
verbose = .LGB_VERBOSITY
)
)
}, regexp = "Attempting to create a Dataset without any raw data")
......@@ -1098,7 +1091,7 @@ test_that("lgb.cv() correctly handles passing through params to the model file",
, n_iter = n_iter
, early_stopping_round = early_stopping_round
, n_iter_no_change = n_iter_no_change
, verbose = VERBOSITY
, verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS
)
......@@ -1108,7 +1101,7 @@ test_that("lgb.cv() correctly handles passing through params to the model file",
, nrounds = nrounds_kwarg
, early_stopping_rounds = early_stopping_round_kwarg
, nfold = 3L
, verbose = VERBOSITY
, verbose = .LGB_VERBOSITY
)
for (bst in cv_bst$boosters) {
......@@ -1143,7 +1136,7 @@ test_that("params (including dataset params) should be stored in .rds file for B
objective = "binary"
, max_depth = 4L
, bagging_fraction = 0.8
, verbose = VERBOSITY
, verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS
)
bst <- Booster$new(
......@@ -1160,7 +1153,7 @@ test_that("params (including dataset params) should be stored in .rds file for B
objective = "binary"
, max_depth = 4L
, bagging_fraction = 0.8
, verbose = VERBOSITY
, verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS
, max_bin = 17L
)
......@@ -1180,7 +1173,7 @@ test_that("params (including dataset params) should be stored in .rds file for B
objective = "binary"
, max_depth = 4L
, bagging_fraction = 0.8
, verbose = VERBOSITY
, verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS
)
bst <- Booster$new(
......@@ -1197,7 +1190,7 @@ test_that("params (including dataset params) should be stored in .rds file for B
objective = "binary"
, max_depth = 4L
, bagging_fraction = 0.8
, verbose = VERBOSITY
, verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS
, max_bin = 17L
)
......@@ -1212,7 +1205,7 @@ test_that("Handle is automatically restored when calling predict", {
, nrounds = 5L
, obj = "binary"
, params = list(
verbose = VERBOSITY
verbose = .LGB_VERBOSITY
)
, num_threads = .LGB_MAX_THREADS
)
......@@ -1236,7 +1229,7 @@ test_that("boosters with linear models at leaves work with saveRDS.lgb.Booster a
params <- list(
objective = "regression"
, verbose = VERBOSITY
, verbose = .LGB_VERBOSITY
, metric = "mse"
, seed = 0L
, num_leaves = 2L
......@@ -1276,7 +1269,7 @@ test_that("boosters with linear models at leaves can be written to RDS and re-lo
params <- list(
objective = "regression"
, verbose = VERBOSITY
, verbose = .LGB_VERBOSITY
, metric = "mse"
, seed = 0L
, num_leaves = 2L
......@@ -1386,7 +1379,7 @@ test_that("Booster's print, show, and summary work correctly", {
min_data_in_bin = 1L
)
)
, verbose = VERBOSITY
, verbose = .LGB_VERBOSITY
, nrounds = 5L
)
.check_methods_work(model)
......@@ -1398,7 +1391,7 @@ test_that("Booster's print, show, and summary work correctly", {
as.matrix(iris[, -5L])
, label = as.numeric(factor(iris$Species)) - 1.0
)
, verbose = VERBOSITY
, verbose = .LGB_VERBOSITY
, nrounds = 5L
)
.check_methods_work(model)
......@@ -1431,7 +1424,7 @@ test_that("Booster's print, show, and summary work correctly", {
)
, obj = .logregobj
, eval = .evalerror
, verbose = VERBOSITY
, verbose = .LGB_VERBOSITY
, nrounds = 5L
, params = list(num_threads = .LGB_MAX_THREADS)
)
......@@ -1454,7 +1447,7 @@ test_that("LGBM_BoosterGetNumFeature_R returns correct outputs", {
min_data_in_bin = 1L
)
)
, verbose = VERBOSITY
, verbose = .LGB_VERBOSITY
, nrounds = 5L
)
ncols <- .Call(LGBM_BoosterGetNumFeature_R, model$.__enclos_env__$private$handle)
......@@ -1467,7 +1460,7 @@ test_that("LGBM_BoosterGetNumFeature_R returns correct outputs", {
as.matrix(iris[, -5L])
, label = as.numeric(factor(iris$Species)) - 1.0
)
, verbose = VERBOSITY
, verbose = .LGB_VERBOSITY
, nrounds = 5L
)
ncols <- .Call(LGBM_BoosterGetNumFeature_R, model$.__enclos_env__$private$handle)
......
VERBOSITY <- as.integer(
Sys.getenv("LIGHTGBM_TEST_VERBOSITY", "-1")
)
.sigmoid <- function(x) {
1.0 / (1.0 + exp(-x))
}
......@@ -30,7 +26,7 @@ test_that("lgb.intereprete works as expected for binary classification", {
, max_depth = -1L
, min_data_in_leaf = 1L
, min_sum_hessian_in_leaf = 1.0
, verbose = VERBOSITY
, verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS
)
model <- lgb.train(
......@@ -83,7 +79,7 @@ test_that("lgb.intereprete works as expected for multiclass classification", {
, num_class = 3L
, learning_rate = 0.00001
, min_data = 1L
, verbose = VERBOSITY
, verbose = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS
)
model <- lgb.train(
......
VERBOSITY <- as.integer(
Sys.getenv("LIGHTGBM_TEST_VERBOSITY", "-1")
)
test_that("lgb.plot.importance() should run without error for well-formed inputs", {
data(agaricus.train, package = "lightgbm")
train <- agaricus.train
......@@ -13,7 +9,7 @@ test_that("lgb.plot.importance() should run without error for well-formed inputs
, max_depth = -1L
, min_data_in_leaf = 1L
, min_sum_hessian_in_leaf = 1.0
, verbosity = VERBOSITY
, verbosity = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS
)
model <- lgb.train(params, dtrain, 3L)
......
VERBOSITY <- as.integer(
Sys.getenv("LIGHTGBM_TEST_VERBOSITY", "-1")
)
.sigmoid <- function(x) {
1.0 / (1.0 + exp(-x))
}
......@@ -30,7 +26,7 @@ test_that("lgb.plot.interepretation works as expected for binary classification"
, max_depth = -1L
, min_data_in_leaf = 1L
, min_sum_hessian_in_leaf = 1.0
, verbosity = VERBOSITY
, verbosity = .LGB_VERBOSITY
, num_threads = .LGB_MAX_THREADS
)
model <- lgb.train(
......@@ -87,7 +83,7 @@ test_that("lgb.plot.interepretation works as expected for multiclass classificat
params = params
, data = dtrain
, nrounds = 3L
, verbose = VERBOSITY
, verbose = .LGB_VERBOSITY
)
num_trees <- 5L
tree_interpretation <- lgb.interprete(
......
VERBOSITY <- as.integer(
Sys.getenv("LIGHTGBM_TEST_VERBOSITY", "-1")
)
test_that("Gamma regression reacts on 'weight'", {
n <- 100L
set.seed(87L)
......@@ -17,7 +13,7 @@ test_that("Gamma regression reacts on 'weight'", {
params = params
, data = dtrain
, nrounds = 4L
, verbose = VERBOSITY
, verbose = .LGB_VERBOSITY
)
pred_unweighted <- predict(bst, X_pred)
......@@ -31,7 +27,7 @@ test_that("Gamma regression reacts on 'weight'", {
params = params
, data = dtrain
, nrounds = 4L
, verbose = VERBOSITY
, verbose = .LGB_VERBOSITY
)
pred_weighted_1 <- predict(bst, X_pred)
......@@ -45,7 +41,7 @@ test_that("Gamma regression reacts on 'weight'", {
params = params
, data = dtrain
, nrounds = 4L
, verbose = VERBOSITY
, verbose = .LGB_VERBOSITY
)
pred_weighted_2 <- predict(bst, X_pred)
......@@ -59,7 +55,7 @@ test_that("Gamma regression reacts on 'weight'", {
params = params
, data = dtrain
, nrounds = 4L
, verbose = VERBOSITY
, verbose = .LGB_VERBOSITY
)
pred_weighted <- predict(bst, X_pred)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment