Unverified Commit 7dcbb8cd authored by James Lamb's avatar James Lamb Committed by GitHub
Browse files

[R-package] limit number of threads used in tests and examples (fixes #5987) (#5988)

parent 7d4d8975
...@@ -47,6 +47,7 @@ params <- list( ...@@ -47,6 +47,7 @@ params <- list(
, learning_rate = 0.1 , learning_rate = 0.1
, min_data_in_leaf = 1L , min_data_in_leaf = 1L
, min_sum_hessian_in_leaf = 1.0 , min_sum_hessian_in_leaf = 1.0
, num_threads = 2L
) )
model <- lgb.train( model <- lgb.train(
......
...@@ -58,6 +58,7 @@ params <- list( ...@@ -58,6 +58,7 @@ params <- list(
, max_depth = -1L , max_depth = -1L
, min_data_in_leaf = 1L , min_data_in_leaf = 1L
, min_sum_hessian_in_leaf = 1.0 , min_sum_hessian_in_leaf = 1.0
, num_threads = 2L
) )
model <- lgb.train( model <- lgb.train(
params = params params = params
......
...@@ -34,7 +34,9 @@ model <- lightgbm( ...@@ -34,7 +34,9 @@ model <- lightgbm(
, agaricus.train$label , agaricus.train$label
, params = list(objective = "binary") , params = list(objective = "binary")
, nrounds = 5L , nrounds = 5L
, verbose = 0) , verbose = 0
, num_threads = 2L
)
fname <- tempfile(fileext="rds") fname <- tempfile(fileext="rds")
saveRDS(model, fname) saveRDS(model, fname)
......
...@@ -33,6 +33,7 @@ params <- list( ...@@ -33,6 +33,7 @@ params <- list(
, metric = "l2" , metric = "l2"
, min_data = 1L , min_data = 1L
, learning_rate = 1.0 , learning_rate = 1.0
, num_threads = 2L
) )
valids <- list(test = dtest) valids <- list(test = dtest)
model <- lgb.train( model <- lgb.train(
......
...@@ -141,6 +141,7 @@ params <- list( ...@@ -141,6 +141,7 @@ params <- list(
, metric = "l2" , metric = "l2"
, min_data = 1L , min_data = 1L
, learning_rate = 1.0 , learning_rate = 1.0
, num_threads = 2L
) )
valids <- list(test = dtest) valids <- list(test = dtest)
model <- lgb.train( model <- lgb.train(
......
...@@ -132,6 +132,7 @@ params <- list( ...@@ -132,6 +132,7 @@ params <- list(
, metric = "l2" , metric = "l2"
, min_data = 1L , min_data = 1L
, learning_rate = 1.0 , learning_rate = 1.0
, num_threads = 2L
) )
valids <- list(test = dtest) valids <- list(test = dtest)
model <- lgb.train( model <- lgb.train(
......
...@@ -34,6 +34,7 @@ params <- list( ...@@ -34,6 +34,7 @@ params <- list(
, metric = "l2" , metric = "l2"
, min_data = 1L , min_data = 1L
, learning_rate = 1.0 , learning_rate = 1.0
, num_threads = 2L
) )
valids <- list(test = dtest) valids <- list(test = dtest)
model <- lgb.train( model <- lgb.train(
......
...@@ -57,6 +57,7 @@ params <- list( ...@@ -57,6 +57,7 @@ params <- list(
, metric = "l2" , metric = "l2"
, min_data = 1L , min_data = 1L
, learning_rate = 1.0 , learning_rate = 1.0
, num_threads = 2L
) )
valids <- list(test = dtest) valids <- list(test = dtest)
model <- lgb.train( model <- lgb.train(
......
# ref for this file:
#
# * https://r-pkgs.org/testing-design.html#testthat-setup-files
# LightGBM-internal fix to comply with CRAN policy of only using up to 2 threads in tests and example.
#
# per https://cran.r-project.org/web/packages/policies.html
#
# > If running a package uses multiple threads/cores it must never use more than two simultaneously:
# the check farm is a shared resource and will typically be running many checks simultaneously.
#
.LGB_MAX_THREADS <- 2L
...@@ -14,6 +14,7 @@ test_that("Predictor$finalize() should not fail", { ...@@ -14,6 +14,7 @@ test_that("Predictor$finalize() should not fail", {
data = dtrain data = dtrain
, params = list( , params = list(
objective = "regression" objective = "regression"
, num_threads = .LGB_MAX_THREADS
) )
, verbose = VERBOSITY , verbose = VERBOSITY
, nrounds = 3L , nrounds = 3L
...@@ -42,6 +43,7 @@ test_that("predictions do not fail for integer input", { ...@@ -42,6 +43,7 @@ test_that("predictions do not fail for integer input", {
data = dtrain data = dtrain
, params = list( , params = list(
objective = "regression" objective = "regression"
, num_threads = .LGB_MAX_THREADS
) )
, verbose = VERBOSITY , verbose = VERBOSITY
, nrounds = 3L , nrounds = 3L
...@@ -77,6 +79,7 @@ test_that("start_iteration works correctly", { ...@@ -77,6 +79,7 @@ test_that("start_iteration works correctly", {
, learning_rate = 0.6 , learning_rate = 0.6
, objective = "binary" , objective = "binary"
, verbosity = VERBOSITY , verbosity = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
, nrounds = 50L , nrounds = 50L
, valids = list("test" = dtest) , valids = list("test" = dtest)
...@@ -126,7 +129,7 @@ test_that("Feature contributions from sparse inputs produce sparse outputs", { ...@@ -126,7 +129,7 @@ test_that("Feature contributions from sparse inputs produce sparse outputs", {
, obj = "regression" , obj = "regression"
, nrounds = 5L , nrounds = 5L
, verbose = VERBOSITY , verbose = VERBOSITY
, params = list(min_data_in_leaf = 5L) , params = list(min_data_in_leaf = 5L, num_threads = .LGB_MAX_THREADS)
) )
pred_dense <- predict(bst, X, type = "contrib") pred_dense <- predict(bst, X, type = "contrib")
...@@ -157,7 +160,7 @@ test_that("Sparse feature contribution predictions do not take inputs with wrong ...@@ -157,7 +160,7 @@ test_that("Sparse feature contribution predictions do not take inputs with wrong
, obj = "regression" , obj = "regression"
, nrounds = 5L , nrounds = 5L
, verbose = VERBOSITY , verbose = VERBOSITY
, params = list(min_data_in_leaf = 5L) , params = list(min_data_in_leaf = 5L, num_threads = .LGB_MAX_THREADS)
) )
X_wrong <- X[, c(1L:10L, 1L:10L)] X_wrong <- X[, c(1L:10L, 1L:10L)]
...@@ -187,7 +190,7 @@ test_that("Feature contribution predictions do not take non-general CSR or CSC i ...@@ -187,7 +190,7 @@ test_that("Feature contribution predictions do not take non-general CSR or CSC i
, obj = "regression" , obj = "regression"
, nrounds = 5L , nrounds = 5L
, verbose = VERBOSITY , verbose = VERBOSITY
, params = list(min_data_in_leaf = 5L) , params = list(min_data_in_leaf = 5L, num_threads = .LGB_MAX_THREADS)
) )
expect_error(predict(bst, SmatC, type = "contrib")) expect_error(predict(bst, SmatC, type = "contrib"))
...@@ -211,6 +214,7 @@ test_that("predict() params should override keyword argument for raw-score predi ...@@ -211,6 +214,7 @@ test_that("predict() params should override keyword argument for raw-score predi
objective = "binary" objective = "binary"
, min_data_in_leaf = 1L , min_data_in_leaf = 1L
, seed = 708L , seed = 708L
, num_threads = .LGB_MAX_THREADS
) )
, nrounds = 10L , nrounds = 10L
, verbose = VERBOSITY , verbose = VERBOSITY
...@@ -262,6 +266,7 @@ test_that("predict() params should override keyword argument for leaf-index pred ...@@ -262,6 +266,7 @@ test_that("predict() params should override keyword argument for leaf-index pred
objective = "regression" objective = "regression"
, min_data_in_leaf = 1L , min_data_in_leaf = 1L
, seed = 708L , seed = 708L
, num_threads = .LGB_MAX_THREADS
) )
, nrounds = 10L , nrounds = 10L
, verbose = VERBOSITY , verbose = VERBOSITY
...@@ -315,6 +320,7 @@ test_that("predict() params should override keyword argument for feature contrib ...@@ -315,6 +320,7 @@ test_that("predict() params should override keyword argument for feature contrib
objective = "regression" objective = "regression"
, min_data_in_leaf = 1L , min_data_in_leaf = 1L
, seed = 708L , seed = 708L
, num_threads = .LGB_MAX_THREADS
) )
, nrounds = 10L , nrounds = 10L
, verbose = VERBOSITY , verbose = VERBOSITY
...@@ -426,7 +432,7 @@ test_that("predict() keeps row names from data (regression)", { ...@@ -426,7 +432,7 @@ test_that("predict() keeps row names from data (regression)", {
, obj = "regression" , obj = "regression"
, nrounds = 5L , nrounds = 5L
, verbose = VERBOSITY , verbose = VERBOSITY
, params = list(min_data_in_leaf = 1L) , params = list(min_data_in_leaf = 1L, num_threads = .LGB_MAX_THREADS)
) )
.check_all_row_name_expectations(bst, X) .check_all_row_name_expectations(bst, X)
}) })
...@@ -442,6 +448,7 @@ test_that("predict() keeps row names from data (binary classification)", { ...@@ -442,6 +448,7 @@ test_that("predict() keeps row names from data (binary classification)", {
, obj = "binary" , obj = "binary"
, nrounds = 5L , nrounds = 5L
, verbose = VERBOSITY , verbose = VERBOSITY
, params = list(num_threads = .LGB_MAX_THREADS)
) )
.check_all_row_name_expectations(bst, X) .check_all_row_name_expectations(bst, X)
}) })
...@@ -455,7 +462,7 @@ test_that("predict() keeps row names from data (multi-class classification)", { ...@@ -455,7 +462,7 @@ test_that("predict() keeps row names from data (multi-class classification)", {
bst <- lgb.train( bst <- lgb.train(
data = dtrain data = dtrain
, obj = "multiclass" , obj = "multiclass"
, params = list(num_class = 3L) , params = list(num_class = 3L, num_threads = .LGB_MAX_THREADS)
, nrounds = 5L , nrounds = 5L
, verbose = VERBOSITY , verbose = VERBOSITY
) )
...@@ -479,7 +486,7 @@ test_that("predictions for regression and binary classification are returned as ...@@ -479,7 +486,7 @@ test_that("predictions for regression and binary classification are returned as
, obj = "regression" , obj = "regression"
, nrounds = 5L , nrounds = 5L
, verbose = VERBOSITY , verbose = VERBOSITY
, params = list(min_data_in_leaf = 1L) , params = list(min_data_in_leaf = 1L, num_threads = .LGB_MAX_THREADS)
) )
pred <- predict(model, X) pred <- predict(model, X)
expect_true(is.vector(pred)) expect_true(is.vector(pred))
...@@ -497,6 +504,7 @@ test_that("predictions for regression and binary classification are returned as ...@@ -497,6 +504,7 @@ test_that("predictions for regression and binary classification are returned as
, obj = "binary" , obj = "binary"
, nrounds = 5L , nrounds = 5L
, verbose = VERBOSITY , verbose = VERBOSITY
, params = list(num_threads = .LGB_MAX_THREADS)
) )
pred <- predict(model, X) pred <- predict(model, X)
expect_true(is.vector(pred)) expect_true(is.vector(pred))
...@@ -516,7 +524,7 @@ test_that("predictions for multiclass classification are returned as matrix", { ...@@ -516,7 +524,7 @@ test_that("predictions for multiclass classification are returned as matrix", {
, obj = "multiclass" , obj = "multiclass"
, nrounds = 5L , nrounds = 5L
, verbose = VERBOSITY , verbose = VERBOSITY
, params = list(num_class = 3L) , params = list(num_class = 3L, num_threads = .LGB_MAX_THREADS)
) )
pred <- predict(model, X) pred <- predict(model, X)
expect_true(is.matrix(pred)) expect_true(is.matrix(pred))
...@@ -533,7 +541,7 @@ test_that("Single-row predictions are identical to multi-row ones", { ...@@ -533,7 +541,7 @@ test_that("Single-row predictions are identical to multi-row ones", {
X <- as.matrix(mtcars[, -1L]) X <- as.matrix(mtcars[, -1L])
y <- mtcars[, 1L] y <- mtcars[, 1L]
dtrain <- lgb.Dataset(X, label = y, params = list(max_bin = 5L)) dtrain <- lgb.Dataset(X, label = y, params = list(max_bin = 5L))
params <- list(min_data_in_leaf = 2L) params <- list(min_data_in_leaf = 2L, num_threads = .LGB_MAX_THREADS)
model <- lgb.train( model <- lgb.train(
params = params params = params
, data = dtrain , data = dtrain
...@@ -594,7 +602,7 @@ test_that("Fast-predict configuration accepts non-default prediction types", { ...@@ -594,7 +602,7 @@ test_that("Fast-predict configuration accepts non-default prediction types", {
X <- as.matrix(mtcars[, -1L]) X <- as.matrix(mtcars[, -1L])
y <- mtcars[, 1L] y <- mtcars[, 1L]
dtrain <- lgb.Dataset(X, label = y, params = list(max_bin = 5L)) dtrain <- lgb.Dataset(X, label = y, params = list(max_bin = 5L))
params <- list(min_data_in_leaf = 2L) params <- list(min_data_in_leaf = 2L, num_threads = .LGB_MAX_THREADS)
model <- lgb.train( model <- lgb.train(
params = params params = params
, data = dtrain , data = dtrain
...@@ -624,7 +632,7 @@ test_that("Fast-predict configuration does not block other prediction types", { ...@@ -624,7 +632,7 @@ test_that("Fast-predict configuration does not block other prediction types", {
X <- as.matrix(mtcars[, -1L]) X <- as.matrix(mtcars[, -1L])
y <- mtcars[, 1L] y <- mtcars[, 1L]
dtrain <- lgb.Dataset(X, label = y, params = list(max_bin = 5L)) dtrain <- lgb.Dataset(X, label = y, params = list(max_bin = 5L))
params <- list(min_data_in_leaf = 2L) params <- list(min_data_in_leaf = 2L, num_threads = .LGB_MAX_THREADS)
model <- lgb.train( model <- lgb.train(
params = params params = params
, data = dtrain , data = dtrain
...@@ -661,6 +669,7 @@ test_that("predict type='class' returns predicted class for classification objec ...@@ -661,6 +669,7 @@ test_that("predict type='class' returns predicted class for classification objec
, obj = "binary" , obj = "binary"
, nrounds = 5L , nrounds = 5L
, verbose = VERBOSITY , verbose = VERBOSITY
, params = list(num_threads = .LGB_MAX_THREADS)
) )
pred <- predict(bst, X, type = "class") pred <- predict(bst, X, type = "class")
expect_true(all(pred %in% c(0L, 1L))) expect_true(all(pred %in% c(0L, 1L)))
...@@ -674,7 +683,7 @@ test_that("predict type='class' returns predicted class for classification objec ...@@ -674,7 +683,7 @@ test_that("predict type='class' returns predicted class for classification objec
, obj = "multiclass" , obj = "multiclass"
, nrounds = 5L , nrounds = 5L
, verbose = VERBOSITY , verbose = VERBOSITY
, params = list(num_class = 3L) , params = list(num_class = 3L, num_threads = .LGB_MAX_THREADS)
) )
pred <- predict(model, X, type = "class") pred <- predict(model, X, type = "class")
expect_true(all(pred %in% c(0L, 1L, 2L))) expect_true(all(pred %in% c(0L, 1L, 2L)))
...@@ -690,6 +699,7 @@ test_that("predict type='class' returns values in the target's range for regress ...@@ -690,6 +699,7 @@ test_that("predict type='class' returns values in the target's range for regress
, obj = "regression" , obj = "regression"
, nrounds = 5L , nrounds = 5L
, verbose = VERBOSITY , verbose = VERBOSITY
, params = list(num_threads = .LGB_MAX_THREADS)
) )
pred <- predict(bst, X, type = "class") pred <- predict(bst, X, type = "class")
expect_true(!any(pred %in% c(0.0, 1.0))) expect_true(!any(pred %in% c(0.0, 1.0)))
......
This diff is collapsed.
...@@ -39,6 +39,7 @@ param <- list( ...@@ -39,6 +39,7 @@ param <- list(
, objective = logregobj , objective = logregobj
, metric = "auc" , metric = "auc"
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
num_round <- 10L num_round <- 10L
...@@ -54,6 +55,7 @@ test_that("using a custom objective, custom eval, and no other metrics works", { ...@@ -54,6 +55,7 @@ test_that("using a custom objective, custom eval, and no other metrics works", {
num_leaves = 8L num_leaves = 8L
, learning_rate = 1.0 , learning_rate = 1.0
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
, data = dtrain , data = dtrain
, nrounds = 4L , nrounds = 4L
......
...@@ -133,7 +133,7 @@ test_that("Dataset$set_reference() updates categorical_feature, colnames, and pr ...@@ -133,7 +133,7 @@ test_that("Dataset$set_reference() updates categorical_feature, colnames, and pr
dtrain$construct() dtrain$construct()
bst <- Booster$new( bst <- Booster$new(
train_set = dtrain train_set = dtrain
, params = list(verbose = -1L) , params = list(verbose = -1L, num_threads = .LGB_MAX_THREADS)
) )
dtrain$.__enclos_env__$private$predictor <- bst$to_predictor() dtrain$.__enclos_env__$private$predictor <- bst$to_predictor()
...@@ -394,6 +394,7 @@ test_that("lgb.Dataset: should be able to run lgb.train() immediately after usin ...@@ -394,6 +394,7 @@ test_that("lgb.Dataset: should be able to run lgb.train() immediately after usin
, num_leaves = 5L , num_leaves = 5L
, learning_rate = 1.0 , learning_rate = 1.0
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
# should be able to train right away # should be able to train right away
...@@ -429,6 +430,7 @@ test_that("lgb.Dataset: should be able to run lgb.cv() immediately after using l ...@@ -429,6 +430,7 @@ test_that("lgb.Dataset: should be able to run lgb.cv() immediately after using l
, learning_rate = 1.0 , learning_rate = 1.0
, num_iterations = 5L , num_iterations = 5L
, verbosity = VERBOSITY , verbosity = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
# should be able to train right away # should be able to train right away
......
...@@ -27,6 +27,7 @@ test_that("learning-to-rank with lgb.train() works as expected", { ...@@ -27,6 +27,7 @@ test_that("learning-to-rank with lgb.train() works as expected", {
, lambdarank_truncation_level = 3L , lambdarank_truncation_level = 3L
, learning_rate = 0.001 , learning_rate = 0.001
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
model <- lgb.train( model <- lgb.train(
params = params params = params
...@@ -91,6 +92,7 @@ test_that("learning-to-rank with lgb.cv() works as expected", { ...@@ -91,6 +92,7 @@ test_that("learning-to-rank with lgb.cv() works as expected", {
, min_data = 1L , min_data = 1L
, learning_rate = 0.01 , learning_rate = 0.01
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
nfold <- 4L nfold <- 4L
nrounds <- 10L nrounds <- 10L
......
...@@ -13,6 +13,7 @@ test_that("Booster$finalize() should not fail", { ...@@ -13,6 +13,7 @@ test_that("Booster$finalize() should not fail", {
data = dtrain data = dtrain
, params = list( , params = list(
objective = "regression" objective = "regression"
, num_threads = .LGB_MAX_THREADS
) )
, verbose = VERBOSITY , verbose = VERBOSITY
, nrounds = 3L , nrounds = 3L
...@@ -66,6 +67,7 @@ test_that("lgb.get.eval.result() should throw an informative error for incorrect ...@@ -66,6 +67,7 @@ test_that("lgb.get.eval.result() should throw an informative error for incorrect
, min_data = 1L , min_data = 1L
, learning_rate = 1.0 , learning_rate = 1.0
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
, data = dtrain , data = dtrain
, nrounds = 5L , nrounds = 5L
...@@ -185,6 +187,7 @@ test_that("Loading a Booster from a text file works", { ...@@ -185,6 +187,7 @@ test_that("Loading a Booster from a text file works", {
, learning_rate = 1.0 , learning_rate = 1.0
, objective = "binary" , objective = "binary"
, verbosity = VERBOSITY , verbosity = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
bst <- lightgbm( bst <- lightgbm(
data = as.matrix(train$data) data = as.matrix(train$data)
...@@ -227,6 +230,7 @@ test_that("boosters with linear models at leaves can be written to text file and ...@@ -227,6 +230,7 @@ test_that("boosters with linear models at leaves can be written to text file and
, metric = "mse" , metric = "mse"
, seed = 0L , seed = 0L
, num_leaves = 2L , num_leaves = 2L
, num_threads = .LGB_MAX_THREADS
) )
bst <- lgb.train( bst <- lgb.train(
...@@ -268,6 +272,7 @@ test_that("Loading a Booster from a string works", { ...@@ -268,6 +272,7 @@ test_that("Loading a Booster from a string works", {
, learning_rate = 1.0 , learning_rate = 1.0
, objective = "binary" , objective = "binary"
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
, nrounds = 2L , nrounds = 2L
) )
...@@ -299,6 +304,7 @@ test_that("Saving a large model to string should work", { ...@@ -299,6 +304,7 @@ test_that("Saving a large model to string should work", {
num_leaves = 100L num_leaves = 100L
, learning_rate = 0.01 , learning_rate = 0.01
, objective = "binary" , objective = "binary"
, num_threads = .LGB_MAX_THREADS
) )
, nrounds = 500L , nrounds = 500L
, verbose = VERBOSITY , verbose = VERBOSITY
...@@ -342,6 +348,7 @@ test_that("Saving a large model to JSON should work", { ...@@ -342,6 +348,7 @@ test_that("Saving a large model to JSON should work", {
num_leaves = 100L num_leaves = 100L
, learning_rate = 0.01 , learning_rate = 0.01
, objective = "binary" , objective = "binary"
, num_threads = .LGB_MAX_THREADS
) )
, nrounds = 200L , nrounds = 200L
, verbose = VERBOSITY , verbose = VERBOSITY
...@@ -372,6 +379,7 @@ test_that("If a string and a file are both passed to lgb.load() the file is used ...@@ -372,6 +379,7 @@ test_that("If a string and a file are both passed to lgb.load() the file is used
, learning_rate = 1.0 , learning_rate = 1.0
, objective = "binary" , objective = "binary"
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
, nrounds = 2L , nrounds = 2L
) )
...@@ -406,6 +414,7 @@ test_that("Creating a Booster from a Dataset should work", { ...@@ -406,6 +414,7 @@ test_that("Creating a Booster from a Dataset should work", {
params = list( params = list(
objective = "binary" objective = "binary"
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
), ),
train_set = dtrain train_set = dtrain
) )
...@@ -427,6 +436,7 @@ test_that("Creating a Booster from a Dataset with an existing predictor should w ...@@ -427,6 +436,7 @@ test_that("Creating a Booster from a Dataset with an existing predictor should w
, learning_rate = 1.0 , learning_rate = 1.0
, objective = "binary" , objective = "binary"
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
, nrounds = nrounds , nrounds = nrounds
) )
...@@ -440,6 +450,7 @@ test_that("Creating a Booster from a Dataset with an existing predictor should w ...@@ -440,6 +450,7 @@ test_that("Creating a Booster from a Dataset with an existing predictor should w
train_set = dtest train_set = dtest
, params = list( , params = list(
verbose = VERBOSITY verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
) )
expect_true(lgb.is.Booster(bst)) expect_true(lgb.is.Booster(bst))
...@@ -463,6 +474,7 @@ test_that("Booster$eval() should work on a Dataset stored in a binary file", { ...@@ -463,6 +474,7 @@ test_that("Booster$eval() should work on a Dataset stored in a binary file", {
, metric = "l2" , metric = "l2"
, num_leaves = 4L , num_leaves = 4L
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
, data = dtrain , data = dtrain
, nrounds = 2L , nrounds = 2L
...@@ -492,7 +504,7 @@ test_that("Booster$eval() should work on a Dataset stored in a binary file", { ...@@ -492,7 +504,7 @@ test_that("Booster$eval() should work on a Dataset stored in a binary file", {
eval_from_file <- bst$eval( eval_from_file <- bst$eval(
data = lgb.Dataset( data = lgb.Dataset(
data = test_file data = test_file
, params = list(verbose = VERBOSITY) , params = list(verbose = VERBOSITY, num_threads = .LGB_MAX_THREADS)
)$construct() )$construct()
, name = "test" , name = "test"
) )
...@@ -521,6 +533,7 @@ test_that("Booster$rollback_one_iter() should work as expected", { ...@@ -521,6 +533,7 @@ test_that("Booster$rollback_one_iter() should work as expected", {
, learning_rate = 1.0 , learning_rate = 1.0
, objective = "binary" , objective = "binary"
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
, nrounds = nrounds , nrounds = nrounds
) )
...@@ -555,6 +568,7 @@ test_that("Booster$update() passing a train_set works as expected", { ...@@ -555,6 +568,7 @@ test_that("Booster$update() passing a train_set works as expected", {
, learning_rate = 1.0 , learning_rate = 1.0
, objective = "binary" , objective = "binary"
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
, nrounds = nrounds , nrounds = nrounds
) )
...@@ -579,6 +593,7 @@ test_that("Booster$update() passing a train_set works as expected", { ...@@ -579,6 +593,7 @@ test_that("Booster$update() passing a train_set works as expected", {
, learning_rate = 1.0 , learning_rate = 1.0
, objective = "binary" , objective = "binary"
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
, nrounds = nrounds + 1L , nrounds = nrounds + 1L
) )
...@@ -604,6 +619,7 @@ test_that("Booster$update() throws an informative error if you provide a non-Dat ...@@ -604,6 +619,7 @@ test_that("Booster$update() throws an informative error if you provide a non-Dat
, learning_rate = 1.0 , learning_rate = 1.0
, objective = "binary" , objective = "binary"
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
, nrounds = nrounds , nrounds = nrounds
) )
...@@ -631,6 +647,7 @@ test_that("Booster should store parameters and Booster$reset_parameter() should ...@@ -631,6 +647,7 @@ test_that("Booster should store parameters and Booster$reset_parameter() should
, boosting = "gbdt" , boosting = "gbdt"
, num_class = 5L , num_class = 5L
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
bst <- Booster$new( bst <- Booster$new(
params = params params = params
...@@ -658,6 +675,7 @@ test_that("Booster$params should include dataset params, before and after Booste ...@@ -658,6 +675,7 @@ test_that("Booster$params should include dataset params, before and after Booste
, max_depth = 4L , max_depth = 4L
, bagging_fraction = 0.8 , bagging_fraction = 0.8
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
bst <- Booster$new( bst <- Booster$new(
params = params params = params
...@@ -670,6 +688,7 @@ test_that("Booster$params should include dataset params, before and after Booste ...@@ -670,6 +688,7 @@ test_that("Booster$params should include dataset params, before and after Booste
, max_depth = 4L , max_depth = 4L
, bagging_fraction = 0.8 , bagging_fraction = 0.8
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
, max_bin = 17L , max_bin = 17L
) )
) )
...@@ -681,6 +700,7 @@ test_that("Booster$params should include dataset params, before and after Booste ...@@ -681,6 +700,7 @@ test_that("Booster$params should include dataset params, before and after Booste
, max_depth = 4L , max_depth = 4L
, bagging_fraction = 0.9 , bagging_fraction = 0.9
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
, max_bin = 17L , max_bin = 17L
) )
expect_identical(ret_bst$params, expected_params) expect_identical(ret_bst$params, expected_params)
...@@ -699,6 +719,7 @@ test_that("Saving a model with different feature importance types works", { ...@@ -699,6 +719,7 @@ test_that("Saving a model with different feature importance types works", {
, learning_rate = 1.0 , learning_rate = 1.0
, objective = "binary" , objective = "binary"
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
, nrounds = 2L , nrounds = 2L
) )
...@@ -754,6 +775,7 @@ test_that("Saving a model with unknown importance type fails", { ...@@ -754,6 +775,7 @@ test_that("Saving a model with unknown importance type fails", {
, learning_rate = 1.0 , learning_rate = 1.0
, objective = "binary" , objective = "binary"
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
, nrounds = 2L , nrounds = 2L
) )
...@@ -789,6 +811,7 @@ test_that("all parameters are stored correctly with save_model_to_string()", { ...@@ -789,6 +811,7 @@ test_that("all parameters are stored correctly with save_model_to_string()", {
params = list( params = list(
objective = "regression" objective = "regression"
, metric = "l2" , metric = "l2"
, num_threads = .LGB_MAX_THREADS
) )
, data = dtrain , data = dtrain
, nrounds = nrounds , nrounds = nrounds
...@@ -845,6 +868,7 @@ test_that("early_stopping, num_iterations are stored correctly in model string e ...@@ -845,6 +868,7 @@ test_that("early_stopping, num_iterations are stored correctly in model string e
, n_iter = n_iter , n_iter = n_iter
, early_stopping_round = early_stopping_round , early_stopping_round = early_stopping_round
, n_iter_no_change = n_iter_no_change , n_iter_no_change = n_iter_no_change
, num_threads = .LGB_MAX_THREADS
) )
bst <- lgb.train( bst <- lgb.train(
...@@ -884,6 +908,7 @@ test_that("Booster: method calls Booster with a null handle should raise an info ...@@ -884,6 +908,7 @@ test_that("Booster: method calls Booster with a null handle should raise an info
objective = "regression" objective = "regression"
, metric = "l2" , metric = "l2"
, num_leaves = 8L , num_leaves = 8L
, num_threads = .LGB_MAX_THREADS
) )
, data = dtrain , data = dtrain
, verbose = VERBOSITY , verbose = VERBOSITY
...@@ -1074,6 +1099,7 @@ test_that("lgb.cv() correctly handles passing through params to the model file", ...@@ -1074,6 +1099,7 @@ test_that("lgb.cv() correctly handles passing through params to the model file",
, early_stopping_round = early_stopping_round , early_stopping_round = early_stopping_round
, n_iter_no_change = n_iter_no_change , n_iter_no_change = n_iter_no_change
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
cv_bst <- lgb.cv( cv_bst <- lgb.cv(
...@@ -1118,6 +1144,7 @@ test_that("params (including dataset params) should be stored in .rds file for B ...@@ -1118,6 +1144,7 @@ test_that("params (including dataset params) should be stored in .rds file for B
, max_depth = 4L , max_depth = 4L
, bagging_fraction = 0.8 , bagging_fraction = 0.8
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
bst <- Booster$new( bst <- Booster$new(
params = params params = params
...@@ -1134,6 +1161,7 @@ test_that("params (including dataset params) should be stored in .rds file for B ...@@ -1134,6 +1161,7 @@ test_that("params (including dataset params) should be stored in .rds file for B
, max_depth = 4L , max_depth = 4L
, bagging_fraction = 0.8 , bagging_fraction = 0.8
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
, max_bin = 17L , max_bin = 17L
) )
) )
...@@ -1153,6 +1181,7 @@ test_that("params (including dataset params) should be stored in .rds file for B ...@@ -1153,6 +1181,7 @@ test_that("params (including dataset params) should be stored in .rds file for B
, max_depth = 4L , max_depth = 4L
, bagging_fraction = 0.8 , bagging_fraction = 0.8
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
bst <- Booster$new( bst <- Booster$new(
params = params params = params
...@@ -1169,6 +1198,7 @@ test_that("params (including dataset params) should be stored in .rds file for B ...@@ -1169,6 +1198,7 @@ test_that("params (including dataset params) should be stored in .rds file for B
, max_depth = 4L , max_depth = 4L
, bagging_fraction = 0.8 , bagging_fraction = 0.8
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
, max_bin = 17L , max_bin = 17L
) )
) )
...@@ -1184,6 +1214,7 @@ test_that("Handle is automatically restored when calling predict", { ...@@ -1184,6 +1214,7 @@ test_that("Handle is automatically restored when calling predict", {
, params = list( , params = list(
verbose = VERBOSITY verbose = VERBOSITY
) )
, num_threads = .LGB_MAX_THREADS
) )
bst_file <- tempfile(fileext = ".rds") bst_file <- tempfile(fileext = ".rds")
saveRDS(bst, file = bst_file) saveRDS(bst, file = bst_file)
...@@ -1209,6 +1240,7 @@ test_that("boosters with linear models at leaves work with saveRDS.lgb.Booster a ...@@ -1209,6 +1240,7 @@ test_that("boosters with linear models at leaves work with saveRDS.lgb.Booster a
, metric = "mse" , metric = "mse"
, seed = 0L , seed = 0L
, num_leaves = 2L , num_leaves = 2L
, num_threads = .LGB_MAX_THREADS
) )
bst <- lgb.train( bst <- lgb.train(
...@@ -1248,6 +1280,7 @@ test_that("boosters with linear models at leaves can be written to RDS and re-lo ...@@ -1248,6 +1280,7 @@ test_that("boosters with linear models at leaves can be written to RDS and re-lo
, metric = "mse" , metric = "mse"
, seed = 0L , seed = 0L
, num_leaves = 2L , num_leaves = 2L
, num_threads = .LGB_MAX_THREADS
) )
bst <- lgb.train( bst <- lgb.train(
...@@ -1344,6 +1377,7 @@ test_that("Booster's print, show, and summary work correctly", { ...@@ -1344,6 +1377,7 @@ test_that("Booster's print, show, and summary work correctly", {
params = list( params = list(
objective = "regression" objective = "regression"
, min_data_in_leaf = 1L , min_data_in_leaf = 1L
, num_threads = .LGB_MAX_THREADS
) )
, data = lgb.Dataset( , data = lgb.Dataset(
as.matrix(mtcars[, -1L]) as.matrix(mtcars[, -1L])
...@@ -1359,7 +1393,7 @@ test_that("Booster's print, show, and summary work correctly", { ...@@ -1359,7 +1393,7 @@ test_that("Booster's print, show, and summary work correctly", {
data("iris") data("iris")
model <- lgb.train( model <- lgb.train(
params = list(objective = "multiclass", num_class = 3L) params = list(objective = "multiclass", num_class = 3L, num_threads = .LGB_MAX_THREADS)
, data = lgb.Dataset( , data = lgb.Dataset(
as.matrix(iris[, -5L]) as.matrix(iris[, -5L])
, label = as.numeric(factor(iris$Species)) - 1.0 , label = as.numeric(factor(iris$Species)) - 1.0
...@@ -1399,6 +1433,7 @@ test_that("Booster's print, show, and summary work correctly", { ...@@ -1399,6 +1433,7 @@ test_that("Booster's print, show, and summary work correctly", {
, eval = .evalerror , eval = .evalerror
, verbose = VERBOSITY , verbose = VERBOSITY
, nrounds = 5L , nrounds = 5L
, params = list(num_threads = .LGB_MAX_THREADS)
) )
.check_methods_work(model) .check_methods_work(model)
...@@ -1410,6 +1445,7 @@ test_that("LGBM_BoosterGetNumFeature_R returns correct outputs", { ...@@ -1410,6 +1445,7 @@ test_that("LGBM_BoosterGetNumFeature_R returns correct outputs", {
params = list( params = list(
objective = "regression" objective = "regression"
, min_data_in_leaf = 1L , min_data_in_leaf = 1L
, num_threads = .LGB_MAX_THREADS
) )
, data = lgb.Dataset( , data = lgb.Dataset(
as.matrix(mtcars[, -1L]) as.matrix(mtcars[, -1L])
......
...@@ -31,6 +31,7 @@ test_that("lgb.intereprete works as expected for binary classification", { ...@@ -31,6 +31,7 @@ test_that("lgb.intereprete works as expected for binary classification", {
, min_data_in_leaf = 1L , min_data_in_leaf = 1L
, min_sum_hessian_in_leaf = 1.0 , min_sum_hessian_in_leaf = 1.0
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
model <- lgb.train( model <- lgb.train(
params = params params = params
...@@ -83,6 +84,7 @@ test_that("lgb.intereprete works as expected for multiclass classification", { ...@@ -83,6 +84,7 @@ test_that("lgb.intereprete works as expected for multiclass classification", {
, learning_rate = 0.00001 , learning_rate = 0.00001
, min_data = 1L , min_data = 1L
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
model <- lgb.train( model <- lgb.train(
params = params params = params
......
...@@ -14,6 +14,7 @@ test_that("lgb.plot.importance() should run without error for well-formed inputs ...@@ -14,6 +14,7 @@ test_that("lgb.plot.importance() should run without error for well-formed inputs
, min_data_in_leaf = 1L , min_data_in_leaf = 1L
, min_sum_hessian_in_leaf = 1.0 , min_sum_hessian_in_leaf = 1.0
, verbosity = VERBOSITY , verbosity = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
model <- lgb.train(params, dtrain, 3L) model <- lgb.train(params, dtrain, 3L)
tree_imp <- lgb.importance(model, percentage = TRUE) tree_imp <- lgb.importance(model, percentage = TRUE)
......
...@@ -31,6 +31,7 @@ test_that("lgb.plot.interepretation works as expected for binary classification" ...@@ -31,6 +31,7 @@ test_that("lgb.plot.interepretation works as expected for binary classification"
, min_data_in_leaf = 1L , min_data_in_leaf = 1L
, min_sum_hessian_in_leaf = 1.0 , min_sum_hessian_in_leaf = 1.0
, verbosity = VERBOSITY , verbosity = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
model <- lgb.train( model <- lgb.train(
params = params params = params
...@@ -80,6 +81,7 @@ test_that("lgb.plot.interepretation works as expected for multiclass classificat ...@@ -80,6 +81,7 @@ test_that("lgb.plot.interepretation works as expected for multiclass classificat
, num_class = 3L , num_class = 3L
, learning_rate = 0.00001 , learning_rate = 0.00001
, min_data = 1L , min_data = 1L
, num_threads = .LGB_MAX_THREADS
) )
model <- lgb.train( model <- lgb.train(
params = params params = params
......
...@@ -20,6 +20,7 @@ test_that("Feature penalties work properly", { ...@@ -20,6 +20,7 @@ test_that("Feature penalties work properly", {
, objective = "binary" , objective = "binary"
, feature_penalty = paste0(feature_penalties, collapse = ",") , feature_penalty = paste0(feature_penalties, collapse = ",")
, metric = "binary_error" , metric = "binary_error"
, num_threads = .LGB_MAX_THREADS
) )
, nrounds = 5L , nrounds = 5L
, verbose = -1L , verbose = -1L
...@@ -97,6 +98,7 @@ test_that("training should warn if you use 'dart' boosting, specified with 'boos ...@@ -97,6 +98,7 @@ test_that("training should warn if you use 'dart' boosting, specified with 'boos
, learning_rate = 0.05 , learning_rate = 0.05
, objective = "binary" , objective = "binary"
, metric = "binary_error" , metric = "binary_error"
, num_threads = .LGB_MAX_THREADS
) )
params[[boosting_param]] <- "dart" params[[boosting_param]] <- "dart"
expect_warning({ expect_warning({
......
...@@ -9,7 +9,7 @@ test_that("Gamma regression reacts on 'weight'", { ...@@ -9,7 +9,7 @@ test_that("Gamma regression reacts on 'weight'", {
y <- X[, 1L] + X[, 2L] + runif(n) y <- X[, 1L] + X[, 2L] + runif(n)
X_pred <- X[1L:5L, ] X_pred <- X[1L:5L, ]
params <- list(objective = "gamma") params <- list(objective = "gamma", num_threads = .LGB_MAX_THREADS)
# Unweighted # Unweighted
dtrain <- lgb.Dataset(X, label = y) dtrain <- lgb.Dataset(X, label = y)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment