Unverified Commit 7dcbb8cd authored by James Lamb's avatar James Lamb Committed by GitHub
Browse files

[R-package] limit number of threads used in tests and examples (fixes #5987) (#5988)

parent 7d4d8975
...@@ -47,6 +47,7 @@ params <- list( ...@@ -47,6 +47,7 @@ params <- list(
, learning_rate = 0.1 , learning_rate = 0.1
, min_data_in_leaf = 1L , min_data_in_leaf = 1L
, min_sum_hessian_in_leaf = 1.0 , min_sum_hessian_in_leaf = 1.0
, num_threads = 2L
) )
model <- lgb.train( model <- lgb.train(
......
...@@ -58,6 +58,7 @@ params <- list( ...@@ -58,6 +58,7 @@ params <- list(
, max_depth = -1L , max_depth = -1L
, min_data_in_leaf = 1L , min_data_in_leaf = 1L
, min_sum_hessian_in_leaf = 1.0 , min_sum_hessian_in_leaf = 1.0
, num_threads = 2L
) )
model <- lgb.train( model <- lgb.train(
params = params params = params
......
...@@ -34,7 +34,9 @@ model <- lightgbm( ...@@ -34,7 +34,9 @@ model <- lightgbm(
, agaricus.train$label , agaricus.train$label
, params = list(objective = "binary") , params = list(objective = "binary")
, nrounds = 5L , nrounds = 5L
, verbose = 0) , verbose = 0
, num_threads = 2L
)
fname <- tempfile(fileext="rds") fname <- tempfile(fileext="rds")
saveRDS(model, fname) saveRDS(model, fname)
......
...@@ -33,6 +33,7 @@ params <- list( ...@@ -33,6 +33,7 @@ params <- list(
, metric = "l2" , metric = "l2"
, min_data = 1L , min_data = 1L
, learning_rate = 1.0 , learning_rate = 1.0
, num_threads = 2L
) )
valids <- list(test = dtest) valids <- list(test = dtest)
model <- lgb.train( model <- lgb.train(
......
...@@ -141,6 +141,7 @@ params <- list( ...@@ -141,6 +141,7 @@ params <- list(
, metric = "l2" , metric = "l2"
, min_data = 1L , min_data = 1L
, learning_rate = 1.0 , learning_rate = 1.0
, num_threads = 2L
) )
valids <- list(test = dtest) valids <- list(test = dtest)
model <- lgb.train( model <- lgb.train(
......
...@@ -132,6 +132,7 @@ params <- list( ...@@ -132,6 +132,7 @@ params <- list(
, metric = "l2" , metric = "l2"
, min_data = 1L , min_data = 1L
, learning_rate = 1.0 , learning_rate = 1.0
, num_threads = 2L
) )
valids <- list(test = dtest) valids <- list(test = dtest)
model <- lgb.train( model <- lgb.train(
......
...@@ -34,6 +34,7 @@ params <- list( ...@@ -34,6 +34,7 @@ params <- list(
, metric = "l2" , metric = "l2"
, min_data = 1L , min_data = 1L
, learning_rate = 1.0 , learning_rate = 1.0
, num_threads = 2L
) )
valids <- list(test = dtest) valids <- list(test = dtest)
model <- lgb.train( model <- lgb.train(
......
...@@ -57,6 +57,7 @@ params <- list( ...@@ -57,6 +57,7 @@ params <- list(
, metric = "l2" , metric = "l2"
, min_data = 1L , min_data = 1L
, learning_rate = 1.0 , learning_rate = 1.0
, num_threads = 2L
) )
valids <- list(test = dtest) valids <- list(test = dtest)
model <- lgb.train( model <- lgb.train(
......
# ref for this file:
#
# * https://r-pkgs.org/testing-design.html#testthat-setup-files
# LightGBM-internal fix to comply with CRAN policy of only using up to 2 threads in tests and example.
#
# per https://cran.r-project.org/web/packages/policies.html
#
# > If running a package uses multiple threads/cores it must never use more than two simultaneously:
# the check farm is a shared resource and will typically be running many checks simultaneously.
#
.LGB_MAX_THREADS <- 2L
...@@ -14,6 +14,7 @@ test_that("Predictor$finalize() should not fail", { ...@@ -14,6 +14,7 @@ test_that("Predictor$finalize() should not fail", {
data = dtrain data = dtrain
, params = list( , params = list(
objective = "regression" objective = "regression"
, num_threads = .LGB_MAX_THREADS
) )
, verbose = VERBOSITY , verbose = VERBOSITY
, nrounds = 3L , nrounds = 3L
...@@ -42,6 +43,7 @@ test_that("predictions do not fail for integer input", { ...@@ -42,6 +43,7 @@ test_that("predictions do not fail for integer input", {
data = dtrain data = dtrain
, params = list( , params = list(
objective = "regression" objective = "regression"
, num_threads = .LGB_MAX_THREADS
) )
, verbose = VERBOSITY , verbose = VERBOSITY
, nrounds = 3L , nrounds = 3L
...@@ -77,6 +79,7 @@ test_that("start_iteration works correctly", { ...@@ -77,6 +79,7 @@ test_that("start_iteration works correctly", {
, learning_rate = 0.6 , learning_rate = 0.6
, objective = "binary" , objective = "binary"
, verbosity = VERBOSITY , verbosity = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
, nrounds = 50L , nrounds = 50L
, valids = list("test" = dtest) , valids = list("test" = dtest)
...@@ -126,7 +129,7 @@ test_that("Feature contributions from sparse inputs produce sparse outputs", { ...@@ -126,7 +129,7 @@ test_that("Feature contributions from sparse inputs produce sparse outputs", {
, obj = "regression" , obj = "regression"
, nrounds = 5L , nrounds = 5L
, verbose = VERBOSITY , verbose = VERBOSITY
, params = list(min_data_in_leaf = 5L) , params = list(min_data_in_leaf = 5L, num_threads = .LGB_MAX_THREADS)
) )
pred_dense <- predict(bst, X, type = "contrib") pred_dense <- predict(bst, X, type = "contrib")
...@@ -157,7 +160,7 @@ test_that("Sparse feature contribution predictions do not take inputs with wrong ...@@ -157,7 +160,7 @@ test_that("Sparse feature contribution predictions do not take inputs with wrong
, obj = "regression" , obj = "regression"
, nrounds = 5L , nrounds = 5L
, verbose = VERBOSITY , verbose = VERBOSITY
, params = list(min_data_in_leaf = 5L) , params = list(min_data_in_leaf = 5L, num_threads = .LGB_MAX_THREADS)
) )
X_wrong <- X[, c(1L:10L, 1L:10L)] X_wrong <- X[, c(1L:10L, 1L:10L)]
...@@ -187,7 +190,7 @@ test_that("Feature contribution predictions do not take non-general CSR or CSC i ...@@ -187,7 +190,7 @@ test_that("Feature contribution predictions do not take non-general CSR or CSC i
, obj = "regression" , obj = "regression"
, nrounds = 5L , nrounds = 5L
, verbose = VERBOSITY , verbose = VERBOSITY
, params = list(min_data_in_leaf = 5L) , params = list(min_data_in_leaf = 5L, num_threads = .LGB_MAX_THREADS)
) )
expect_error(predict(bst, SmatC, type = "contrib")) expect_error(predict(bst, SmatC, type = "contrib"))
...@@ -211,6 +214,7 @@ test_that("predict() params should override keyword argument for raw-score predi ...@@ -211,6 +214,7 @@ test_that("predict() params should override keyword argument for raw-score predi
objective = "binary" objective = "binary"
, min_data_in_leaf = 1L , min_data_in_leaf = 1L
, seed = 708L , seed = 708L
, num_threads = .LGB_MAX_THREADS
) )
, nrounds = 10L , nrounds = 10L
, verbose = VERBOSITY , verbose = VERBOSITY
...@@ -262,6 +266,7 @@ test_that("predict() params should override keyword argument for leaf-index pred ...@@ -262,6 +266,7 @@ test_that("predict() params should override keyword argument for leaf-index pred
objective = "regression" objective = "regression"
, min_data_in_leaf = 1L , min_data_in_leaf = 1L
, seed = 708L , seed = 708L
, num_threads = .LGB_MAX_THREADS
) )
, nrounds = 10L , nrounds = 10L
, verbose = VERBOSITY , verbose = VERBOSITY
...@@ -315,6 +320,7 @@ test_that("predict() params should override keyword argument for feature contrib ...@@ -315,6 +320,7 @@ test_that("predict() params should override keyword argument for feature contrib
objective = "regression" objective = "regression"
, min_data_in_leaf = 1L , min_data_in_leaf = 1L
, seed = 708L , seed = 708L
, num_threads = .LGB_MAX_THREADS
) )
, nrounds = 10L , nrounds = 10L
, verbose = VERBOSITY , verbose = VERBOSITY
...@@ -426,7 +432,7 @@ test_that("predict() keeps row names from data (regression)", { ...@@ -426,7 +432,7 @@ test_that("predict() keeps row names from data (regression)", {
, obj = "regression" , obj = "regression"
, nrounds = 5L , nrounds = 5L
, verbose = VERBOSITY , verbose = VERBOSITY
, params = list(min_data_in_leaf = 1L) , params = list(min_data_in_leaf = 1L, num_threads = .LGB_MAX_THREADS)
) )
.check_all_row_name_expectations(bst, X) .check_all_row_name_expectations(bst, X)
}) })
...@@ -442,6 +448,7 @@ test_that("predict() keeps row names from data (binary classification)", { ...@@ -442,6 +448,7 @@ test_that("predict() keeps row names from data (binary classification)", {
, obj = "binary" , obj = "binary"
, nrounds = 5L , nrounds = 5L
, verbose = VERBOSITY , verbose = VERBOSITY
, params = list(num_threads = .LGB_MAX_THREADS)
) )
.check_all_row_name_expectations(bst, X) .check_all_row_name_expectations(bst, X)
}) })
...@@ -455,7 +462,7 @@ test_that("predict() keeps row names from data (multi-class classification)", { ...@@ -455,7 +462,7 @@ test_that("predict() keeps row names from data (multi-class classification)", {
bst <- lgb.train( bst <- lgb.train(
data = dtrain data = dtrain
, obj = "multiclass" , obj = "multiclass"
, params = list(num_class = 3L) , params = list(num_class = 3L, num_threads = .LGB_MAX_THREADS)
, nrounds = 5L , nrounds = 5L
, verbose = VERBOSITY , verbose = VERBOSITY
) )
...@@ -479,7 +486,7 @@ test_that("predictions for regression and binary classification are returned as ...@@ -479,7 +486,7 @@ test_that("predictions for regression and binary classification are returned as
, obj = "regression" , obj = "regression"
, nrounds = 5L , nrounds = 5L
, verbose = VERBOSITY , verbose = VERBOSITY
, params = list(min_data_in_leaf = 1L) , params = list(min_data_in_leaf = 1L, num_threads = .LGB_MAX_THREADS)
) )
pred <- predict(model, X) pred <- predict(model, X)
expect_true(is.vector(pred)) expect_true(is.vector(pred))
...@@ -497,6 +504,7 @@ test_that("predictions for regression and binary classification are returned as ...@@ -497,6 +504,7 @@ test_that("predictions for regression and binary classification are returned as
, obj = "binary" , obj = "binary"
, nrounds = 5L , nrounds = 5L
, verbose = VERBOSITY , verbose = VERBOSITY
, params = list(num_threads = .LGB_MAX_THREADS)
) )
pred <- predict(model, X) pred <- predict(model, X)
expect_true(is.vector(pred)) expect_true(is.vector(pred))
...@@ -516,7 +524,7 @@ test_that("predictions for multiclass classification are returned as matrix", { ...@@ -516,7 +524,7 @@ test_that("predictions for multiclass classification are returned as matrix", {
, obj = "multiclass" , obj = "multiclass"
, nrounds = 5L , nrounds = 5L
, verbose = VERBOSITY , verbose = VERBOSITY
, params = list(num_class = 3L) , params = list(num_class = 3L, num_threads = .LGB_MAX_THREADS)
) )
pred <- predict(model, X) pred <- predict(model, X)
expect_true(is.matrix(pred)) expect_true(is.matrix(pred))
...@@ -533,7 +541,7 @@ test_that("Single-row predictions are identical to multi-row ones", { ...@@ -533,7 +541,7 @@ test_that("Single-row predictions are identical to multi-row ones", {
X <- as.matrix(mtcars[, -1L]) X <- as.matrix(mtcars[, -1L])
y <- mtcars[, 1L] y <- mtcars[, 1L]
dtrain <- lgb.Dataset(X, label = y, params = list(max_bin = 5L)) dtrain <- lgb.Dataset(X, label = y, params = list(max_bin = 5L))
params <- list(min_data_in_leaf = 2L) params <- list(min_data_in_leaf = 2L, num_threads = .LGB_MAX_THREADS)
model <- lgb.train( model <- lgb.train(
params = params params = params
, data = dtrain , data = dtrain
...@@ -594,7 +602,7 @@ test_that("Fast-predict configuration accepts non-default prediction types", { ...@@ -594,7 +602,7 @@ test_that("Fast-predict configuration accepts non-default prediction types", {
X <- as.matrix(mtcars[, -1L]) X <- as.matrix(mtcars[, -1L])
y <- mtcars[, 1L] y <- mtcars[, 1L]
dtrain <- lgb.Dataset(X, label = y, params = list(max_bin = 5L)) dtrain <- lgb.Dataset(X, label = y, params = list(max_bin = 5L))
params <- list(min_data_in_leaf = 2L) params <- list(min_data_in_leaf = 2L, num_threads = .LGB_MAX_THREADS)
model <- lgb.train( model <- lgb.train(
params = params params = params
, data = dtrain , data = dtrain
...@@ -624,7 +632,7 @@ test_that("Fast-predict configuration does not block other prediction types", { ...@@ -624,7 +632,7 @@ test_that("Fast-predict configuration does not block other prediction types", {
X <- as.matrix(mtcars[, -1L]) X <- as.matrix(mtcars[, -1L])
y <- mtcars[, 1L] y <- mtcars[, 1L]
dtrain <- lgb.Dataset(X, label = y, params = list(max_bin = 5L)) dtrain <- lgb.Dataset(X, label = y, params = list(max_bin = 5L))
params <- list(min_data_in_leaf = 2L) params <- list(min_data_in_leaf = 2L, num_threads = .LGB_MAX_THREADS)
model <- lgb.train( model <- lgb.train(
params = params params = params
, data = dtrain , data = dtrain
...@@ -661,6 +669,7 @@ test_that("predict type='class' returns predicted class for classification objec ...@@ -661,6 +669,7 @@ test_that("predict type='class' returns predicted class for classification objec
, obj = "binary" , obj = "binary"
, nrounds = 5L , nrounds = 5L
, verbose = VERBOSITY , verbose = VERBOSITY
, params = list(num_threads = .LGB_MAX_THREADS)
) )
pred <- predict(bst, X, type = "class") pred <- predict(bst, X, type = "class")
expect_true(all(pred %in% c(0L, 1L))) expect_true(all(pred %in% c(0L, 1L)))
...@@ -674,7 +683,7 @@ test_that("predict type='class' returns predicted class for classification objec ...@@ -674,7 +683,7 @@ test_that("predict type='class' returns predicted class for classification objec
, obj = "multiclass" , obj = "multiclass"
, nrounds = 5L , nrounds = 5L
, verbose = VERBOSITY , verbose = VERBOSITY
, params = list(num_class = 3L) , params = list(num_class = 3L, num_threads = .LGB_MAX_THREADS)
) )
pred <- predict(model, X, type = "class") pred <- predict(model, X, type = "class")
expect_true(all(pred %in% c(0L, 1L, 2L))) expect_true(all(pred %in% c(0L, 1L, 2L)))
...@@ -690,6 +699,7 @@ test_that("predict type='class' returns values in the target's range for regress ...@@ -690,6 +699,7 @@ test_that("predict type='class' returns values in the target's range for regress
, obj = "regression" , obj = "regression"
, nrounds = 5L , nrounds = 5L
, verbose = VERBOSITY , verbose = VERBOSITY
, params = list(num_threads = .LGB_MAX_THREADS)
) )
pred <- predict(bst, X, type = "class") pred <- predict(bst, X, type = "class")
expect_true(!any(pred %in% c(0.0, 1.0))) expect_true(!any(pred %in% c(0.0, 1.0)))
......
...@@ -55,18 +55,22 @@ CONSTANT_METRIC_VALUE <- 0.2 ...@@ -55,18 +55,22 @@ CONSTANT_METRIC_VALUE <- 0.2
DTRAIN_RANDOM_REGRESSION <- lgb.Dataset( DTRAIN_RANDOM_REGRESSION <- lgb.Dataset(
data = as.matrix(rnorm(100L), ncol = 1L, drop = FALSE) data = as.matrix(rnorm(100L), ncol = 1L, drop = FALSE)
, label = rnorm(100L) , label = rnorm(100L)
, params = list(num_threads = .LGB_MAX_THREADS)
) )
DVALID_RANDOM_REGRESSION <- lgb.Dataset( DVALID_RANDOM_REGRESSION <- lgb.Dataset(
data = as.matrix(rnorm(50L), ncol = 1L, drop = FALSE) data = as.matrix(rnorm(50L), ncol = 1L, drop = FALSE)
, label = rnorm(50L) , label = rnorm(50L)
, params = list(num_threads = .LGB_MAX_THREADS)
) )
DTRAIN_RANDOM_CLASSIFICATION <- lgb.Dataset( DTRAIN_RANDOM_CLASSIFICATION <- lgb.Dataset(
data = as.matrix(rnorm(120L), ncol = 1L, drop = FALSE) data = as.matrix(rnorm(120L), ncol = 1L, drop = FALSE)
, label = sample(c(0L, 1L), size = 120L, replace = TRUE) , label = sample(c(0L, 1L), size = 120L, replace = TRUE)
, params = list(num_threads = .LGB_MAX_THREADS)
) )
DVALID_RANDOM_CLASSIFICATION <- lgb.Dataset( DVALID_RANDOM_CLASSIFICATION <- lgb.Dataset(
data = as.matrix(rnorm(37L), ncol = 1L, drop = FALSE) data = as.matrix(rnorm(37L), ncol = 1L, drop = FALSE)
, label = sample(c(0L, 1L), size = 37L, replace = TRUE) , label = sample(c(0L, 1L), size = 37L, replace = TRUE)
, params = list(num_threads = .LGB_MAX_THREADS)
) )
test_that("train and predict binary classification", { test_that("train and predict binary classification", {
...@@ -79,6 +83,7 @@ test_that("train and predict binary classification", { ...@@ -79,6 +83,7 @@ test_that("train and predict binary classification", {
, objective = "binary" , objective = "binary"
, metric = "binary_error" , metric = "binary_error"
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
, nrounds = nrounds , nrounds = nrounds
, valids = list( , valids = list(
...@@ -120,6 +125,7 @@ test_that("train and predict softmax", { ...@@ -120,6 +125,7 @@ test_that("train and predict softmax", {
, metric = "multi_error" , metric = "multi_error"
, num_class = 3L , num_class = 3L
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
, nrounds = 20L , nrounds = 20L
, valids = list( , valids = list(
...@@ -150,12 +156,14 @@ test_that("use of multiple eval metrics works", { ...@@ -150,12 +156,14 @@ test_that("use of multiple eval metrics works", {
, objective = "binary" , objective = "binary"
, metric = metrics , metric = metrics
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
, nrounds = 10L , nrounds = 10L
, valids = list( , valids = list(
"train" = lgb.Dataset( "train" = lgb.Dataset(
data = train$data data = train$data
, label = train$label , label = train$label
, params = list(num_threads = .LGB_MAX_THREADS)
) )
) )
) )
...@@ -179,6 +187,7 @@ test_that("lgb.Booster.upper_bound() and lgb.Booster.lower_bound() work as expec ...@@ -179,6 +187,7 @@ test_that("lgb.Booster.upper_bound() and lgb.Booster.lower_bound() work as expec
, objective = "binary" , objective = "binary"
, metric = "binary_error" , metric = "binary_error"
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
, nrounds = nrounds , nrounds = nrounds
) )
...@@ -197,6 +206,7 @@ test_that("lgb.Booster.upper_bound() and lgb.Booster.lower_bound() work as expec ...@@ -197,6 +206,7 @@ test_that("lgb.Booster.upper_bound() and lgb.Booster.lower_bound() work as expec
, objective = "regression" , objective = "regression"
, metric = "l2" , metric = "l2"
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
, nrounds = nrounds , nrounds = nrounds
) )
...@@ -206,7 +216,7 @@ test_that("lgb.Booster.upper_bound() and lgb.Booster.lower_bound() work as expec ...@@ -206,7 +216,7 @@ test_that("lgb.Booster.upper_bound() and lgb.Booster.lower_bound() work as expec
test_that("lightgbm() rejects negative or 0 value passed to nrounds", { test_that("lightgbm() rejects negative or 0 value passed to nrounds", {
dtrain <- lgb.Dataset(train$data, label = train$label) dtrain <- lgb.Dataset(train$data, label = train$label)
params <- list(objective = "regression", metric = "l2,l1") params <- list(objective = "regression", metric = "l2,l1", num_threads = .LGB_MAX_THREADS)
for (nround_value in c(-10L, 0L)) { for (nround_value in c(-10L, 0L)) {
expect_error({ expect_error({
bst <- lightgbm( bst <- lightgbm(
...@@ -231,6 +241,7 @@ test_that("lightgbm() accepts nrounds as either a top-level argument or paramete ...@@ -231,6 +241,7 @@ test_that("lightgbm() accepts nrounds as either a top-level argument or paramete
, metric = "l2" , metric = "l2"
, num_leaves = 5L , num_leaves = 5L
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
) )
...@@ -244,6 +255,7 @@ test_that("lightgbm() accepts nrounds as either a top-level argument or paramete ...@@ -244,6 +255,7 @@ test_that("lightgbm() accepts nrounds as either a top-level argument or paramete
, num_leaves = 5L , num_leaves = 5L
, nrounds = nrounds , nrounds = nrounds
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
) )
...@@ -258,6 +270,7 @@ test_that("lightgbm() accepts nrounds as either a top-level argument or paramete ...@@ -258,6 +270,7 @@ test_that("lightgbm() accepts nrounds as either a top-level argument or paramete
, num_leaves = 5L , num_leaves = 5L
, nrounds = nrounds , nrounds = nrounds
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
) )
...@@ -285,10 +298,12 @@ test_that("lightgbm() performs evaluation on validation sets if they are provide ...@@ -285,10 +298,12 @@ test_that("lightgbm() performs evaluation on validation sets if they are provide
dvalid1 <- lgb.Dataset( dvalid1 <- lgb.Dataset(
data = train$data data = train$data
, label = train$label , label = train$label
, params = list(num_threads = .LGB_MAX_THREADS)
) )
dvalid2 <- lgb.Dataset( dvalid2 <- lgb.Dataset(
data = train$data data = train$data
, label = train$label , label = train$label
, params = list(num_threads = .LGB_MAX_THREADS)
) )
nrounds <- 10L nrounds <- 10L
bst <- lightgbm( bst <- lightgbm(
...@@ -302,6 +317,7 @@ test_that("lightgbm() performs evaluation on validation sets if they are provide ...@@ -302,6 +317,7 @@ test_that("lightgbm() performs evaluation on validation sets if they are provide
, "auc" , "auc"
) )
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
, nrounds = nrounds , nrounds = nrounds
, valids = list( , valids = list(
...@@ -310,6 +326,7 @@ test_that("lightgbm() performs evaluation on validation sets if they are provide ...@@ -310,6 +326,7 @@ test_that("lightgbm() performs evaluation on validation sets if they are provide
, "train" = lgb.Dataset( , "train" = lgb.Dataset(
data = train$data data = train$data
, label = train$label , label = train$label
, params = list(num_threads = .LGB_MAX_THREADS)
) )
) )
) )
...@@ -334,6 +351,7 @@ test_that("training continuation works", { ...@@ -334,6 +351,7 @@ test_that("training continuation works", {
train$data train$data
, label = train$label , label = train$label
, free_raw_data = FALSE , free_raw_data = FALSE
, params = list(num_threads = .LGB_MAX_THREADS)
) )
watchlist <- list(train = dtrain) watchlist <- list(train = dtrain)
param <- list( param <- list(
...@@ -342,6 +360,7 @@ test_that("training continuation works", { ...@@ -342,6 +360,7 @@ test_that("training continuation works", {
, num_leaves = 5L , num_leaves = 5L
, learning_rate = 1.0 , learning_rate = 1.0
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
# train for 10 consecutive iterations # train for 10 consecutive iterations
...@@ -368,6 +387,7 @@ test_that("cv works", { ...@@ -368,6 +387,7 @@ test_that("cv works", {
, min_data = 1L , min_data = 1L
, learning_rate = 1.0 , learning_rate = 1.0
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
bst <- lgb.cv( bst <- lgb.cv(
params params
...@@ -388,6 +408,7 @@ test_that("CVBooster$reset_parameter() works as expected", { ...@@ -388,6 +408,7 @@ test_that("CVBooster$reset_parameter() works as expected", {
, min_data = 1L , min_data = 1L
, num_leaves = 7L , num_leaves = 7L
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
, data = dtrain , data = dtrain
, nrounds = 3L , nrounds = 3L
...@@ -405,11 +426,12 @@ test_that("CVBooster$reset_parameter() works as expected", { ...@@ -405,11 +426,12 @@ test_that("CVBooster$reset_parameter() works as expected", {
}) })
test_that("lgb.cv() rejects negative or 0 value passed to nrounds", { test_that("lgb.cv() rejects negative or 0 value passed to nrounds", {
dtrain <- lgb.Dataset(train$data, label = train$label) dtrain <- lgb.Dataset(train$data, label = train$label, params = list(num_threads = 2L))
params <- list( params <- list(
objective = "regression" objective = "regression"
, metric = "l2,l1" , metric = "l2,l1"
, min_data = 1L , min_data = 1L
, num_threads = .LGB_MAX_THREADS
) )
for (nround_value in c(-10L, 0L)) { for (nround_value in c(-10L, 0L)) {
expect_error({ expect_error({
...@@ -453,6 +475,7 @@ test_that("lightgbm.cv() gives the correct best_score and best_iter for a metric ...@@ -453,6 +475,7 @@ test_that("lightgbm.cv() gives the correct best_score and best_iter for a metric
dtrain <- lgb.Dataset( dtrain <- lgb.Dataset(
data = as.matrix(runif(n = 500L, min = 0.0, max = 15.0), drop = FALSE) data = as.matrix(runif(n = 500L, min = 0.0, max = 15.0), drop = FALSE)
, label = rep(c(0L, 1L), 250L) , label = rep(c(0L, 1L), 250L)
, params = list(num_threads = .LGB_MAX_THREADS)
) )
nrounds <- 10L nrounds <- 10L
cv_bst <- lgb.cv( cv_bst <- lgb.cv(
...@@ -465,6 +488,7 @@ test_that("lightgbm.cv() gives the correct best_score and best_iter for a metric ...@@ -465,6 +488,7 @@ test_that("lightgbm.cv() gives the correct best_score and best_iter for a metric
, learning_rate = 1.5 , learning_rate = 1.5
, num_leaves = 5L , num_leaves = 5L
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
) )
expect_true(methods::is(cv_bst, "lgb.CVBooster")) expect_true(methods::is(cv_bst, "lgb.CVBooster"))
...@@ -487,6 +511,7 @@ test_that("lgb.cv() fit on linearly-relatead data improves when using linear lea ...@@ -487,6 +511,7 @@ test_that("lgb.cv() fit on linearly-relatead data improves when using linear lea
return(lgb.Dataset( return(lgb.Dataset(
data = X data = X
, label = 2L * X + runif(nrow(X), 0L, 0.1) , label = 2L * X + runif(nrow(X), 0L, 0.1)
, params = list(num_threads = .LGB_MAX_THREADS)
)) ))
} }
...@@ -496,6 +521,7 @@ test_that("lgb.cv() fit on linearly-relatead data improves when using linear lea ...@@ -496,6 +521,7 @@ test_that("lgb.cv() fit on linearly-relatead data improves when using linear lea
, metric = "mse" , metric = "mse"
, seed = 0L , seed = 0L
, num_leaves = 2L , num_leaves = 2L
, num_threads = .LGB_MAX_THREADS
) )
dtrain <- .new_dataset() dtrain <- .new_dataset()
...@@ -520,12 +546,13 @@ test_that("lgb.cv() fit on linearly-relatead data improves when using linear lea ...@@ -520,12 +546,13 @@ test_that("lgb.cv() fit on linearly-relatead data improves when using linear lea
}) })
test_that("lgb.cv() respects showsd argument", { test_that("lgb.cv() respects showsd argument", {
dtrain <- lgb.Dataset(train$data, label = train$label) dtrain <- lgb.Dataset(train$data, label = train$label, params = list(num_threads = .LGB_MAX_THREADS))
params <- list( params <- list(
objective = "regression" objective = "regression"
, metric = "l2" , metric = "l2"
, min_data = 1L , min_data = 1L
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
nrounds <- 5L nrounds <- 5L
set.seed(708L) set.seed(708L)
...@@ -559,6 +586,7 @@ test_that("lgb.cv() raises an informative error for unrecognized objectives", { ...@@ -559,6 +586,7 @@ test_that("lgb.cv() raises an informative error for unrecognized objectives", {
dtrain <- lgb.Dataset( dtrain <- lgb.Dataset(
data = train$data data = train$data
, label = train$label , label = train$label
, params = list(num_threads = .LGB_MAX_THREADS)
) )
expect_error({ expect_error({
capture.output({ capture.output({
...@@ -567,6 +595,7 @@ test_that("lgb.cv() raises an informative error for unrecognized objectives", { ...@@ -567,6 +595,7 @@ test_that("lgb.cv() raises an informative error for unrecognized objectives", {
, params = list( , params = list(
objective_type = "not_a_real_objective" objective_type = "not_a_real_objective"
, verbosity = VERBOSITY , verbosity = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
) )
}, type = "message") }, type = "message")
...@@ -579,6 +608,7 @@ test_that("lgb.cv() respects parameter aliases for objective", { ...@@ -579,6 +608,7 @@ test_that("lgb.cv() respects parameter aliases for objective", {
dtrain <- lgb.Dataset( dtrain <- lgb.Dataset(
data = train$data data = train$data
, label = train$label , label = train$label
, params = list(num_threads = .LGB_MAX_THREADS)
) )
cv_bst <- lgb.cv( cv_bst <- lgb.cv(
data = dtrain data = dtrain
...@@ -587,6 +617,7 @@ test_that("lgb.cv() respects parameter aliases for objective", { ...@@ -587,6 +617,7 @@ test_that("lgb.cv() respects parameter aliases for objective", {
, application = "binary" , application = "binary"
, num_iterations = nrounds , num_iterations = nrounds
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
, nfold = nfold , nfold = nfold
) )
...@@ -602,10 +633,12 @@ test_that("lgb.cv() prefers objective in params to keyword argument", { ...@@ -602,10 +633,12 @@ test_that("lgb.cv() prefers objective in params to keyword argument", {
data = lgb.Dataset( data = lgb.Dataset(
data = EuStockMarkets[, c("SMI", "CAC", "FTSE")] data = EuStockMarkets[, c("SMI", "CAC", "FTSE")]
, label = EuStockMarkets[, "DAX"] , label = EuStockMarkets[, "DAX"]
, params = list(num_threads = .LGB_MAX_THREADS)
) )
, params = list( , params = list(
application = "regression_l1" application = "regression_l1"
, verbosity = VERBOSITY , verbosity = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
, nrounds = 5L , nrounds = 5L
, obj = "regression_l2" , obj = "regression_l2"
...@@ -631,6 +664,7 @@ test_that("lgb.cv() respects parameter aliases for metric", { ...@@ -631,6 +664,7 @@ test_that("lgb.cv() respects parameter aliases for metric", {
dtrain <- lgb.Dataset( dtrain <- lgb.Dataset(
data = train$data data = train$data
, label = train$label , label = train$label
, params = list(num_threads = .LGB_MAX_THREADS)
) )
cv_bst <- lgb.cv( cv_bst <- lgb.cv(
data = dtrain data = dtrain
...@@ -640,6 +674,7 @@ test_that("lgb.cv() respects parameter aliases for metric", { ...@@ -640,6 +674,7 @@ test_that("lgb.cv() respects parameter aliases for metric", {
, num_iterations = nrounds , num_iterations = nrounds
, metric_types = c("auc", "binary_logloss") , metric_types = c("auc", "binary_logloss")
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
, nfold = nfold , nfold = nfold
) )
...@@ -657,6 +692,7 @@ test_that("lgb.cv() respects eval_train_metric argument", { ...@@ -657,6 +692,7 @@ test_that("lgb.cv() respects eval_train_metric argument", {
, metric = "l2" , metric = "l2"
, min_data = 1L , min_data = 1L
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
nrounds <- 5L nrounds <- 5L
set.seed(708L) set.seed(708L)
...@@ -696,6 +732,7 @@ test_that("lgb.train() works as expected with multiple eval metrics", { ...@@ -696,6 +732,7 @@ test_that("lgb.train() works as expected with multiple eval metrics", {
data = lgb.Dataset( data = lgb.Dataset(
train$data train$data
, label = train$label , label = train$label
, params = list(num_threads = .LGB_MAX_THREADS)
) )
, nrounds = 10L , nrounds = 10L
, params = list( , params = list(
...@@ -703,11 +740,13 @@ test_that("lgb.train() works as expected with multiple eval metrics", { ...@@ -703,11 +740,13 @@ test_that("lgb.train() works as expected with multiple eval metrics", {
, metric = metrics , metric = metrics
, learning_rate = 1.0 , learning_rate = 1.0
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
, valids = list( , valids = list(
"train" = lgb.Dataset( "train" = lgb.Dataset(
train$data train$data
, label = train$label , label = train$label
, params = list(num_threads = .LGB_MAX_THREADS)
) )
) )
) )
...@@ -743,6 +782,7 @@ test_that("lgb.train() respects parameter aliases for objective", { ...@@ -743,6 +782,7 @@ test_that("lgb.train() respects parameter aliases for objective", {
dtrain <- lgb.Dataset( dtrain <- lgb.Dataset(
data = train$data data = train$data
, label = train$label , label = train$label
, params = list(num_threads = .LGB_MAX_THREADS)
) )
bst <- lgb.train( bst <- lgb.train(
data = dtrain data = dtrain
...@@ -751,6 +791,7 @@ test_that("lgb.train() respects parameter aliases for objective", { ...@@ -751,6 +791,7 @@ test_that("lgb.train() respects parameter aliases for objective", {
, application = "binary" , application = "binary"
, num_iterations = nrounds , num_iterations = nrounds
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
, valids = list( , valids = list(
"the_training_data" = dtrain "the_training_data" = dtrain
...@@ -767,10 +808,12 @@ test_that("lgb.train() prefers objective in params to keyword argument", { ...@@ -767,10 +808,12 @@ test_that("lgb.train() prefers objective in params to keyword argument", {
data = lgb.Dataset( data = lgb.Dataset(
data = EuStockMarkets[, c("SMI", "CAC", "FTSE")] data = EuStockMarkets[, c("SMI", "CAC", "FTSE")]
, label = EuStockMarkets[, "DAX"] , label = EuStockMarkets[, "DAX"]
, params = list(num_threads = .LGB_MAX_THREADS)
) )
, params = list( , params = list(
loss = "regression_l1" loss = "regression_l1"
, verbosity = VERBOSITY , verbosity = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
, nrounds = 5L , nrounds = 5L
, obj = "regression_l2" , obj = "regression_l2"
...@@ -792,6 +835,7 @@ test_that("lgb.train() respects parameter aliases for metric", { ...@@ -792,6 +835,7 @@ test_that("lgb.train() respects parameter aliases for metric", {
dtrain <- lgb.Dataset( dtrain <- lgb.Dataset(
data = train$data data = train$data
, label = train$label , label = train$label
, params = list(num_threads = .LGB_MAX_THREADS)
) )
bst <- lgb.train( bst <- lgb.train(
data = dtrain data = dtrain
...@@ -801,6 +845,7 @@ test_that("lgb.train() respects parameter aliases for metric", { ...@@ -801,6 +845,7 @@ test_that("lgb.train() respects parameter aliases for metric", {
, num_iterations = nrounds , num_iterations = nrounds
, metric_types = c("auc", "binary_logloss") , metric_types = c("auc", "binary_logloss")
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
, valids = list( , valids = list(
"train" = dtrain "train" = dtrain
...@@ -814,11 +859,12 @@ test_that("lgb.train() respects parameter aliases for metric", { ...@@ -814,11 +859,12 @@ test_that("lgb.train() respects parameter aliases for metric", {
}) })
test_that("lgb.train() rejects negative or 0 value passed to nrounds", { test_that("lgb.train() rejects negative or 0 value passed to nrounds", {
dtrain <- lgb.Dataset(train$data, label = train$label) dtrain <- lgb.Dataset(train$data, label = train$label, params = list(num_threads = .LGB_MAX_THREADS))
params <- list( params <- list(
objective = "regression" objective = "regression"
, metric = "l2,l1" , metric = "l2,l1"
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
for (nround_value in c(-10L, 0L)) { for (nround_value in c(-10L, 0L)) {
expect_error({ expect_error({
...@@ -840,6 +886,7 @@ test_that("lgb.train() accepts nrounds as either a top-level argument or paramet ...@@ -840,6 +886,7 @@ test_that("lgb.train() accepts nrounds as either a top-level argument or paramet
data = lgb.Dataset( data = lgb.Dataset(
train$data train$data
, label = train$label , label = train$label
, params = list(num_threads = .LGB_MAX_THREADS)
) )
, nrounds = nrounds , nrounds = nrounds
, params = list( , params = list(
...@@ -847,6 +894,7 @@ test_that("lgb.train() accepts nrounds as either a top-level argument or paramet ...@@ -847,6 +894,7 @@ test_that("lgb.train() accepts nrounds as either a top-level argument or paramet
, metric = "l2" , metric = "l2"
, num_leaves = 5L , num_leaves = 5L
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
) )
...@@ -855,6 +903,7 @@ test_that("lgb.train() accepts nrounds as either a top-level argument or paramet ...@@ -855,6 +903,7 @@ test_that("lgb.train() accepts nrounds as either a top-level argument or paramet
data = lgb.Dataset( data = lgb.Dataset(
train$data train$data
, label = train$label , label = train$label
, params = list(num_threads = .LGB_MAX_THREADS)
) )
, params = list( , params = list(
objective = "regression" objective = "regression"
...@@ -870,6 +919,7 @@ test_that("lgb.train() accepts nrounds as either a top-level argument or paramet ...@@ -870,6 +919,7 @@ test_that("lgb.train() accepts nrounds as either a top-level argument or paramet
data = lgb.Dataset( data = lgb.Dataset(
train$data train$data
, label = train$label , label = train$label
, params = list(num_threads = .LGB_MAX_THREADS)
) )
, nrounds = 20L , nrounds = 20L
, params = list( , params = list(
...@@ -878,6 +928,7 @@ test_that("lgb.train() accepts nrounds as either a top-level argument or paramet ...@@ -878,6 +928,7 @@ test_that("lgb.train() accepts nrounds as either a top-level argument or paramet
, num_leaves = 5L , num_leaves = 5L
, nrounds = nrounds , nrounds = nrounds
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
) )
...@@ -988,12 +1039,14 @@ test_that("lgb.train() works with force_col_wise and force_row_wise", { ...@@ -988,12 +1039,14 @@ test_that("lgb.train() works with force_col_wise and force_row_wise", {
dtrain <- lgb.Dataset( dtrain <- lgb.Dataset(
train$data train$data
, label = train$label , label = train$label
, params = list(num_threads = .LGB_MAX_THREADS)
) )
params <- list( params <- list(
objective = "binary" objective = "binary"
, metric = "binary_error" , metric = "binary_error"
, force_col_wise = TRUE , force_col_wise = TRUE
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
bst_col_wise <- lgb.train( bst_col_wise <- lgb.train(
params = params params = params
...@@ -1006,6 +1059,7 @@ test_that("lgb.train() works with force_col_wise and force_row_wise", { ...@@ -1006,6 +1059,7 @@ test_that("lgb.train() works with force_col_wise and force_row_wise", {
, metric = "binary_error" , metric = "binary_error"
, force_row_wise = TRUE , force_row_wise = TRUE
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
bst_row_wise <- lgb.train( bst_row_wise <- lgb.train(
params = params params = params
...@@ -1037,6 +1091,7 @@ test_that("lgb.train() works as expected with sparse features", { ...@@ -1037,6 +1091,7 @@ test_that("lgb.train() works as expected with sparse features", {
dtrain <- lgb.Dataset( dtrain <- lgb.Dataset(
data = as.matrix(trainDF[["x"]], drop = FALSE) data = as.matrix(trainDF[["x"]], drop = FALSE)
, label = trainDF[["y"]] , label = trainDF[["y"]]
, params = list(num_threads = .LGB_MAX_THREADS)
) )
nrounds <- 1L nrounds <- 1L
bst <- lgb.train( bst <- lgb.train(
...@@ -1045,6 +1100,7 @@ test_that("lgb.train() works as expected with sparse features", { ...@@ -1045,6 +1100,7 @@ test_that("lgb.train() works as expected with sparse features", {
, min_data = 1L , min_data = 1L
, min_data_in_bin = 1L , min_data_in_bin = 1L
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
, data = dtrain , data = dtrain
, nrounds = nrounds , nrounds = nrounds
...@@ -1071,10 +1127,12 @@ test_that("lgb.train() works with early stopping for classification", { ...@@ -1071,10 +1127,12 @@ test_that("lgb.train() works with early stopping for classification", {
dtrain <- lgb.Dataset( dtrain <- lgb.Dataset(
data = as.matrix(trainDF[["feat1"]], drop = FALSE) data = as.matrix(trainDF[["feat1"]], drop = FALSE)
, label = trainDF[["target"]] , label = trainDF[["target"]]
, params = list(num_threads = .LGB_MAX_THREADS)
) )
dvalid <- lgb.Dataset( dvalid <- lgb.Dataset(
data = as.matrix(validDF[["feat1"]], drop = FALSE) data = as.matrix(validDF[["feat1"]], drop = FALSE)
, label = validDF[["target"]] , label = validDF[["target"]]
, params = list(num_threads = .LGB_MAX_THREADS)
) )
nrounds <- 10L nrounds <- 10L
...@@ -1086,6 +1144,7 @@ test_that("lgb.train() works with early stopping for classification", { ...@@ -1086,6 +1144,7 @@ test_that("lgb.train() works with early stopping for classification", {
objective = "binary" objective = "binary"
, metric = "binary_error" , metric = "binary_error"
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
, data = dtrain , data = dtrain
, nrounds = nrounds , nrounds = nrounds
...@@ -1110,6 +1169,7 @@ test_that("lgb.train() works with early stopping for classification", { ...@@ -1110,6 +1169,7 @@ test_that("lgb.train() works with early stopping for classification", {
, metric = "binary_error" , metric = "binary_error"
, early_stopping_rounds = early_stopping_rounds , early_stopping_rounds = early_stopping_rounds
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
, data = dtrain , data = dtrain
, nrounds = nrounds , nrounds = nrounds
...@@ -1142,10 +1202,12 @@ test_that("lgb.train() treats early_stopping_rounds<=0 as disabling early stoppi ...@@ -1142,10 +1202,12 @@ test_that("lgb.train() treats early_stopping_rounds<=0 as disabling early stoppi
dtrain <- lgb.Dataset( dtrain <- lgb.Dataset(
data = as.matrix(trainDF[["feat1"]], drop = FALSE) data = as.matrix(trainDF[["feat1"]], drop = FALSE)
, label = trainDF[["target"]] , label = trainDF[["target"]]
, params = list(num_threads = .LGB_MAX_THREADS)
) )
dvalid <- lgb.Dataset( dvalid <- lgb.Dataset(
data = as.matrix(validDF[["feat1"]], drop = FALSE) data = as.matrix(validDF[["feat1"]], drop = FALSE)
, label = validDF[["target"]] , label = validDF[["target"]]
, params = list(num_threads = .LGB_MAX_THREADS)
) )
nrounds <- 5L nrounds <- 5L
...@@ -1159,6 +1221,7 @@ test_that("lgb.train() treats early_stopping_rounds<=0 as disabling early stoppi ...@@ -1159,6 +1221,7 @@ test_that("lgb.train() treats early_stopping_rounds<=0 as disabling early stoppi
objective = "binary" objective = "binary"
, metric = "binary_error" , metric = "binary_error"
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
, data = dtrain , data = dtrain
, nrounds = nrounds , nrounds = nrounds
...@@ -1183,6 +1246,7 @@ test_that("lgb.train() treats early_stopping_rounds<=0 as disabling early stoppi ...@@ -1183,6 +1246,7 @@ test_that("lgb.train() treats early_stopping_rounds<=0 as disabling early stoppi
, metric = "binary_error" , metric = "binary_error"
, n_iter_no_change = value , n_iter_no_change = value
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
, data = dtrain , data = dtrain
, nrounds = nrounds , nrounds = nrounds
...@@ -1204,10 +1268,12 @@ test_that("lgb.train() works with early stopping for classification with a metri ...@@ -1204,10 +1268,12 @@ test_that("lgb.train() works with early stopping for classification with a metri
dtrain <- lgb.Dataset( dtrain <- lgb.Dataset(
data = train$data data = train$data
, label = train$label , label = train$label
, params = list(num_threads = .LGB_MAX_THREADS)
) )
dvalid <- lgb.Dataset( dvalid <- lgb.Dataset(
data = test$data data = test$data
, label = test$label , label = test$label
, params = list(num_threads = .LGB_MAX_THREADS)
) )
nrounds <- 10L nrounds <- 10L
...@@ -1223,6 +1289,7 @@ test_that("lgb.train() works with early stopping for classification with a metri ...@@ -1223,6 +1289,7 @@ test_that("lgb.train() works with early stopping for classification with a metri
, max_depth = 3L , max_depth = 3L
, early_stopping_rounds = early_stopping_rounds , early_stopping_rounds = early_stopping_rounds
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
, data = dtrain , data = dtrain
, nrounds = nrounds , nrounds = nrounds
...@@ -1237,6 +1304,7 @@ test_that("lgb.train() works with early stopping for classification with a metri ...@@ -1237,6 +1304,7 @@ test_that("lgb.train() works with early stopping for classification with a metri
, max_depth = 3L , max_depth = 3L
, early_stopping_rounds = early_stopping_rounds , early_stopping_rounds = early_stopping_rounds
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
, data = dtrain , data = dtrain
, nrounds = nrounds , nrounds = nrounds
...@@ -1281,10 +1349,12 @@ test_that("lgb.train() works with early stopping for regression", { ...@@ -1281,10 +1349,12 @@ test_that("lgb.train() works with early stopping for regression", {
dtrain <- lgb.Dataset( dtrain <- lgb.Dataset(
data = as.matrix(trainDF[["feat1"]], drop = FALSE) data = as.matrix(trainDF[["feat1"]], drop = FALSE)
, label = trainDF[["target"]] , label = trainDF[["target"]]
, params = list(num_threads = .LGB_MAX_THREADS)
) )
dvalid <- lgb.Dataset( dvalid <- lgb.Dataset(
data = as.matrix(validDF[["feat1"]], drop = FALSE) data = as.matrix(validDF[["feat1"]], drop = FALSE)
, label = validDF[["target"]] , label = validDF[["target"]]
, params = list(num_threads = .LGB_MAX_THREADS)
) )
nrounds <- 10L nrounds <- 10L
...@@ -1296,6 +1366,7 @@ test_that("lgb.train() works with early stopping for regression", { ...@@ -1296,6 +1366,7 @@ test_that("lgb.train() works with early stopping for regression", {
objective = "regression" objective = "regression"
, metric = "rmse" , metric = "rmse"
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
, data = dtrain , data = dtrain
, nrounds = nrounds , nrounds = nrounds
...@@ -1320,6 +1391,7 @@ test_that("lgb.train() works with early stopping for regression", { ...@@ -1320,6 +1391,7 @@ test_that("lgb.train() works with early stopping for regression", {
, metric = "rmse" , metric = "rmse"
, early_stopping_rounds = early_stopping_rounds , early_stopping_rounds = early_stopping_rounds
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
, data = dtrain , data = dtrain
, nrounds = nrounds , nrounds = nrounds
...@@ -1355,6 +1427,7 @@ test_that("lgb.train() does not stop early if early_stopping_rounds is not given ...@@ -1355,6 +1427,7 @@ test_that("lgb.train() does not stop early if early_stopping_rounds is not given
objective = "regression" objective = "regression"
, metric = "None" , metric = "None"
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
, data = DTRAIN_RANDOM_REGRESSION , data = DTRAIN_RANDOM_REGRESSION
, nrounds = nrounds , nrounds = nrounds
...@@ -1399,6 +1472,7 @@ test_that("If first_metric_only is not given or is FALSE, lgb.train() decides to ...@@ -1399,6 +1472,7 @@ test_that("If first_metric_only is not given or is FALSE, lgb.train() decides to
, metric = "None" , metric = "None"
, early_stopping_rounds = early_stopping_rounds , early_stopping_rounds = early_stopping_rounds
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
, list( , list(
objective = "regression" objective = "regression"
...@@ -1406,6 +1480,7 @@ test_that("If first_metric_only is not given or is FALSE, lgb.train() decides to ...@@ -1406,6 +1480,7 @@ test_that("If first_metric_only is not given or is FALSE, lgb.train() decides to
, early_stopping_rounds = early_stopping_rounds , early_stopping_rounds = early_stopping_rounds
, first_metric_only = FALSE , first_metric_only = FALSE
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
) )
...@@ -1469,6 +1544,7 @@ test_that("If first_metric_only is TRUE, lgb.train() decides to stop early based ...@@ -1469,6 +1544,7 @@ test_that("If first_metric_only is TRUE, lgb.train() decides to stop early based
, early_stopping_rounds = early_stopping_rounds , early_stopping_rounds = early_stopping_rounds
, first_metric_only = TRUE , first_metric_only = TRUE
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
, data = DTRAIN_RANDOM_REGRESSION , data = DTRAIN_RANDOM_REGRESSION
, nrounds = nrounds , nrounds = nrounds
...@@ -1515,6 +1591,7 @@ test_that("lgb.train() works when a mixture of functions and strings are passed ...@@ -1515,6 +1591,7 @@ test_that("lgb.train() works when a mixture of functions and strings are passed
objective = "regression" objective = "regression"
, metric = "None" , metric = "None"
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
, data = DTRAIN_RANDOM_REGRESSION , data = DTRAIN_RANDOM_REGRESSION
, nrounds = nrounds , nrounds = nrounds
...@@ -1571,6 +1648,7 @@ test_that("lgb.train() works when a list of strings or a character vector is pas ...@@ -1571,6 +1648,7 @@ test_that("lgb.train() works when a list of strings or a character vector is pas
objective = "binary" objective = "binary"
, metric = "None" , metric = "None"
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
, data = DTRAIN_RANDOM_CLASSIFICATION , data = DTRAIN_RANDOM_CLASSIFICATION
, nrounds = nrounds , nrounds = nrounds
...@@ -1608,6 +1686,7 @@ test_that("lgb.train() works when you specify both 'metric' and 'eval' with stri ...@@ -1608,6 +1686,7 @@ test_that("lgb.train() works when you specify both 'metric' and 'eval' with stri
objective = "binary" objective = "binary"
, metric = "binary_error" , metric = "binary_error"
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
, data = DTRAIN_RANDOM_CLASSIFICATION , data = DTRAIN_RANDOM_CLASSIFICATION
, nrounds = nrounds , nrounds = nrounds
...@@ -1640,6 +1719,7 @@ test_that("lgb.train() works when you give a function for eval", { ...@@ -1640,6 +1719,7 @@ test_that("lgb.train() works when you give a function for eval", {
objective = "binary" objective = "binary"
, metric = "None" , metric = "None"
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
, data = DTRAIN_RANDOM_CLASSIFICATION , data = DTRAIN_RANDOM_CLASSIFICATION
, nrounds = nrounds , nrounds = nrounds
...@@ -1667,10 +1747,12 @@ test_that("lgb.train() works with early stopping for regression with a metric th ...@@ -1667,10 +1747,12 @@ test_that("lgb.train() works with early stopping for regression with a metric th
dtrain <- lgb.Dataset( dtrain <- lgb.Dataset(
data = as.matrix(trainDF[["feat1"]], drop = FALSE) data = as.matrix(trainDF[["feat1"]], drop = FALSE)
, label = trainDF[["target"]] , label = trainDF[["target"]]
, params = list(num_threads = .LGB_MAX_THREADS)
) )
dvalid <- lgb.Dataset( dvalid <- lgb.Dataset(
data = as.matrix(validDF[["feat1"]], drop = FALSE) data = as.matrix(validDF[["feat1"]], drop = FALSE)
, label = validDF[["target"]] , label = validDF[["target"]]
, params = list(num_threads = .LGB_MAX_THREADS)
) )
nrounds <- 10L nrounds <- 10L
...@@ -1689,6 +1771,7 @@ test_that("lgb.train() works with early stopping for regression with a metric th ...@@ -1689,6 +1771,7 @@ test_that("lgb.train() works with early stopping for regression with a metric th
, min_data_in_bin = 5L , min_data_in_bin = 5L
, early_stopping_rounds = early_stopping_rounds , early_stopping_rounds = early_stopping_rounds
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
, data = dtrain , data = dtrain
, nrounds = nrounds , nrounds = nrounds
...@@ -1720,6 +1803,7 @@ test_that("lgb.train() supports non-ASCII feature names", { ...@@ -1720,6 +1803,7 @@ test_that("lgb.train() supports non-ASCII feature names", {
dtrain <- lgb.Dataset( dtrain <- lgb.Dataset(
data = matrix(rnorm(400L), ncol = 4L) data = matrix(rnorm(400L), ncol = 4L)
, label = rnorm(100L) , label = rnorm(100L)
, params = list(num_threads = .LGB_MAX_THREADS)
) )
# content below is equivalent to # content below is equivalent to
# #
...@@ -1740,6 +1824,7 @@ test_that("lgb.train() supports non-ASCII feature names", { ...@@ -1740,6 +1824,7 @@ test_that("lgb.train() supports non-ASCII feature names", {
, params = list( , params = list(
metric = "rmse" metric = "rmse"
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
, colnames = feature_names , colnames = feature_names
) )
...@@ -1800,6 +1885,7 @@ test_that("lgb.train() updates params based on keyword arguments", { ...@@ -1800,6 +1885,7 @@ test_that("lgb.train() updates params based on keyword arguments", {
dtrain <- lgb.Dataset( dtrain <- lgb.Dataset(
data = matrix(rnorm(400L), ncol = 4L) data = matrix(rnorm(400L), ncol = 4L)
, label = rnorm(100L) , label = rnorm(100L)
, params = list(num_threads = .LGB_MAX_THREADS)
) )
# defaults from keyword arguments should be used if not specified in params # defaults from keyword arguments should be used if not specified in params
...@@ -1808,7 +1894,7 @@ test_that("lgb.train() updates params based on keyword arguments", { ...@@ -1808,7 +1894,7 @@ test_that("lgb.train() updates params based on keyword arguments", {
bst <- lgb.train( bst <- lgb.train(
data = dtrain data = dtrain
, obj = "regression" , obj = "regression"
, params = list() , params = list(num_threads = .LGB_MAX_THREADS)
) )
}) })
) )
...@@ -1824,6 +1910,7 @@ test_that("lgb.train() updates params based on keyword arguments", { ...@@ -1824,6 +1910,7 @@ test_that("lgb.train() updates params based on keyword arguments", {
, params = list( , params = list(
"verbosity" = 5L "verbosity" = 5L
, "num_iterations" = 2L , "num_iterations" = 2L
, num_threads = .LGB_MAX_THREADS
) )
) )
}) })
...@@ -1840,6 +1927,7 @@ test_that("lgb.train() updates params based on keyword arguments", { ...@@ -1840,6 +1927,7 @@ test_that("lgb.train() updates params based on keyword arguments", {
, params = list( , params = list(
"verbose" = 5L "verbose" = 5L
, "num_boost_round" = 2L , "num_boost_round" = 2L
, num_threads = .LGB_MAX_THREADS
) )
) )
}) })
...@@ -1863,14 +1951,17 @@ test_that("when early stopping is not activated, best_iter and best_score come f ...@@ -1863,14 +1951,17 @@ test_that("when early stopping is not activated, best_iter and best_score come f
dtrain <- lgb.Dataset( dtrain <- lgb.Dataset(
data = as.matrix(trainDF[["feat1"]], drop = FALSE) data = as.matrix(trainDF[["feat1"]], drop = FALSE)
, label = trainDF[["target"]] , label = trainDF[["target"]]
, params = list(num_threads = .LGB_MAX_THREADS)
) )
dvalid1 <- lgb.Dataset( dvalid1 <- lgb.Dataset(
data = as.matrix(validDF[["feat1"]], drop = FALSE) data = as.matrix(validDF[["feat1"]], drop = FALSE)
, label = validDF[["target"]] , label = validDF[["target"]]
, params = list(num_threads = .LGB_MAX_THREADS)
) )
dvalid2 <- lgb.Dataset( dvalid2 <- lgb.Dataset(
data = as.matrix(validDF[1L:10L, "feat1"], drop = FALSE) data = as.matrix(validDF[1L:10L, "feat1"], drop = FALSE)
, label = validDF[1L:10L, "target"] , label = validDF[1L:10L, "target"]
, params = list(num_threads = .LGB_MAX_THREADS)
) )
nrounds <- 10L nrounds <- 10L
train_params <- list( train_params <- list(
...@@ -1879,6 +1970,7 @@ test_that("when early stopping is not activated, best_iter and best_score come f ...@@ -1879,6 +1970,7 @@ test_that("when early stopping is not activated, best_iter and best_score come f
, learning_rate = 1.5 , learning_rate = 1.5
, num_leaves = 5L , num_leaves = 5L
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
# example 1: two valids, neither are the training data # example 1: two valids, neither are the training data
...@@ -2020,10 +2112,12 @@ test_that("lightgbm.train() gives the correct best_score and best_iter for a met ...@@ -2020,10 +2112,12 @@ test_that("lightgbm.train() gives the correct best_score and best_iter for a met
dtrain <- lgb.Dataset( dtrain <- lgb.Dataset(
data = as.matrix(trainDF[["feat1"]], drop = FALSE) data = as.matrix(trainDF[["feat1"]], drop = FALSE)
, label = trainDF[["target"]] , label = trainDF[["target"]]
, params = list(num_threads = .LGB_MAX_THREADS)
) )
dvalid1 <- lgb.Dataset( dvalid1 <- lgb.Dataset(
data = as.matrix(validDF[1L:25L, "feat1"], drop = FALSE) data = as.matrix(validDF[1L:25L, "feat1"], drop = FALSE)
, label = validDF[1L:25L, "target"] , label = validDF[1L:25L, "target"]
, params = list(num_threads = .LGB_MAX_THREADS)
) )
nrounds <- 10L nrounds <- 10L
bst <- lgb.train( bst <- lgb.train(
...@@ -2039,6 +2133,7 @@ test_that("lightgbm.train() gives the correct best_score and best_iter for a met ...@@ -2039,6 +2133,7 @@ test_that("lightgbm.train() gives the correct best_score and best_iter for a met
, learning_rate = 1.5 , learning_rate = 1.5
, num_leaves = 5L , num_leaves = 5L
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
) )
# note that "something-random-we-would-not-hardcode" was recognized as the training # note that "something-random-we-would-not-hardcode" was recognized as the training
...@@ -2070,14 +2165,17 @@ test_that("using lightgbm() without early stopping, best_iter and best_score com ...@@ -2070,14 +2165,17 @@ test_that("using lightgbm() without early stopping, best_iter and best_score com
dtrain <- lgb.Dataset( dtrain <- lgb.Dataset(
data = as.matrix(trainDF[["feat1"]], drop = FALSE) data = as.matrix(trainDF[["feat1"]], drop = FALSE)
, label = trainDF[["target"]] , label = trainDF[["target"]]
, params = list(num_threads = .LGB_MAX_THREADS)
) )
dvalid1 <- lgb.Dataset( dvalid1 <- lgb.Dataset(
data = as.matrix(validDF[1L:25L, "feat1"], drop = FALSE) data = as.matrix(validDF[1L:25L, "feat1"], drop = FALSE)
, label = validDF[1L:25L, "target"] , label = validDF[1L:25L, "target"]
, params = list(num_threads = .LGB_MAX_THREADS)
) )
dvalid2 <- lgb.Dataset( dvalid2 <- lgb.Dataset(
data = as.matrix(validDF[26L:50L, "feat1"], drop = FALSE) data = as.matrix(validDF[26L:50L, "feat1"], drop = FALSE)
, label = validDF[26L:50L, "target"] , label = validDF[26L:50L, "target"]
, params = list(num_threads = .LGB_MAX_THREADS)
) )
nrounds <- 10L nrounds <- 10L
bst <- lightgbm( bst <- lightgbm(
...@@ -2093,6 +2191,7 @@ test_that("using lightgbm() without early stopping, best_iter and best_score com ...@@ -2093,6 +2191,7 @@ test_that("using lightgbm() without early stopping, best_iter and best_score com
, metric = "auc" , metric = "auc"
, learning_rate = 1.5 , learning_rate = 1.5
, num_leaves = 5L , num_leaves = 5L
, num_threads = .LGB_MAX_THREADS
) )
, verbose = -7L , verbose = -7L
) )
...@@ -2120,6 +2219,7 @@ test_that("lgb.cv() works when you specify both 'metric' and 'eval' with strings ...@@ -2120,6 +2219,7 @@ test_that("lgb.cv() works when you specify both 'metric' and 'eval' with strings
objective = "binary" objective = "binary"
, metric = "binary_error" , metric = "binary_error"
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
, data = DTRAIN_RANDOM_CLASSIFICATION , data = DTRAIN_RANDOM_CLASSIFICATION
, nrounds = nrounds , nrounds = nrounds
...@@ -2154,6 +2254,7 @@ test_that("lgb.cv() works when you give a function for eval", { ...@@ -2154,6 +2254,7 @@ test_that("lgb.cv() works when you give a function for eval", {
objective = "binary" objective = "binary"
, metric = "None" , metric = "None"
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
, data = DTRAIN_RANDOM_CLASSIFICATION , data = DTRAIN_RANDOM_CLASSIFICATION
, nfold = nfolds , nfold = nfolds
...@@ -2180,6 +2281,7 @@ test_that("If first_metric_only is TRUE, lgb.cv() decides to stop early based on ...@@ -2180,6 +2281,7 @@ test_that("If first_metric_only is TRUE, lgb.cv() decides to stop early based on
, early_stopping_rounds = early_stopping_rounds , early_stopping_rounds = early_stopping_rounds
, first_metric_only = TRUE , first_metric_only = TRUE
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
, data = DTRAIN_RANDOM_REGRESSION , data = DTRAIN_RANDOM_REGRESSION
, nfold = nfolds , nfold = nfolds
...@@ -2237,6 +2339,7 @@ test_that("early stopping works with lgb.cv()", { ...@@ -2237,6 +2339,7 @@ test_that("early stopping works with lgb.cv()", {
, early_stopping_rounds = early_stopping_rounds , early_stopping_rounds = early_stopping_rounds
, first_metric_only = TRUE , first_metric_only = TRUE
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
, data = DTRAIN_RANDOM_REGRESSION , data = DTRAIN_RANDOM_REGRESSION
, nfold = nfolds , nfold = nfolds
...@@ -2286,11 +2389,12 @@ test_that("lgb.cv() respects changes to logging verbosity", { ...@@ -2286,11 +2389,12 @@ test_that("lgb.cv() respects changes to logging verbosity", {
dtrain <- lgb.Dataset( dtrain <- lgb.Dataset(
data = train$data data = train$data
, label = train$label , label = train$label
, params = list(num_threads = .LGB_MAX_THREADS)
) )
# (verbose = 1) should be INFO and WARNING level logs # (verbose = 1) should be INFO and WARNING level logs
lgb_cv_logs <- capture.output({ lgb_cv_logs <- capture.output({
cv_bst <- lgb.cv( cv_bst <- lgb.cv(
params = list() params = list(num_threads = .LGB_MAX_THREADS)
, nfold = 2L , nfold = 2L
, nrounds = 5L , nrounds = 5L
, data = dtrain , data = dtrain
...@@ -2304,7 +2408,7 @@ test_that("lgb.cv() respects changes to logging verbosity", { ...@@ -2304,7 +2408,7 @@ test_that("lgb.cv() respects changes to logging verbosity", {
# (verbose = 0) should be WARNING level logs only # (verbose = 0) should be WARNING level logs only
lgb_cv_logs <- capture.output({ lgb_cv_logs <- capture.output({
cv_bst <- lgb.cv( cv_bst <- lgb.cv(
params = list() params = list(num_threads = .LGB_MAX_THREADS)
, nfold = 2L , nfold = 2L
, nrounds = 5L , nrounds = 5L
, data = dtrain , data = dtrain
...@@ -2318,7 +2422,7 @@ test_that("lgb.cv() respects changes to logging verbosity", { ...@@ -2318,7 +2422,7 @@ test_that("lgb.cv() respects changes to logging verbosity", {
# (verbose = -1) no logs # (verbose = -1) no logs
lgb_cv_logs <- capture.output({ lgb_cv_logs <- capture.output({
cv_bst <- lgb.cv( cv_bst <- lgb.cv(
params = list() params = list(num_threads = .LGB_MAX_THREADS)
, nfold = 2L , nfold = 2L
, nrounds = 5L , nrounds = 5L
, data = dtrain , data = dtrain
...@@ -2336,6 +2440,7 @@ test_that("lgb.cv() updates params based on keyword arguments", { ...@@ -2336,6 +2440,7 @@ test_that("lgb.cv() updates params based on keyword arguments", {
dtrain <- lgb.Dataset( dtrain <- lgb.Dataset(
data = matrix(rnorm(400L), ncol = 4L) data = matrix(rnorm(400L), ncol = 4L)
, label = rnorm(100L) , label = rnorm(100L)
, params = list(num_threads = .LGB_MAX_THREADS)
) )
# defaults from keyword arguments should be used if not specified in params # defaults from keyword arguments should be used if not specified in params
...@@ -2344,7 +2449,7 @@ test_that("lgb.cv() updates params based on keyword arguments", { ...@@ -2344,7 +2449,7 @@ test_that("lgb.cv() updates params based on keyword arguments", {
cv_bst <- lgb.cv( cv_bst <- lgb.cv(
data = dtrain data = dtrain
, obj = "regression" , obj = "regression"
, params = list() , params = list(num_threads = .LGB_MAX_THREADS)
, nfold = 2L , nfold = 2L
) )
}) })
...@@ -2365,6 +2470,7 @@ test_that("lgb.cv() updates params based on keyword arguments", { ...@@ -2365,6 +2470,7 @@ test_that("lgb.cv() updates params based on keyword arguments", {
, params = list( , params = list(
"verbosity" = 5L "verbosity" = 5L
, "num_iterations" = 2L , "num_iterations" = 2L
, num_threads = .LGB_MAX_THREADS
) )
, nfold = 2L , nfold = 2L
) )
...@@ -2385,6 +2491,7 @@ test_that("lgb.cv() updates params based on keyword arguments", { ...@@ -2385,6 +2491,7 @@ test_that("lgb.cv() updates params based on keyword arguments", {
, params = list( , params = list(
"verbose" = 5L "verbose" = 5L
, "num_boost_round" = 2L , "num_boost_round" = 2L
, num_threads = .LGB_MAX_THREADS
) )
, nfold = 2L , nfold = 2L
) )
...@@ -2407,6 +2514,7 @@ test_that("lgb.train() fit on linearly-relatead data improves when using linear ...@@ -2407,6 +2514,7 @@ test_that("lgb.train() fit on linearly-relatead data improves when using linear
return(lgb.Dataset( return(lgb.Dataset(
data = X data = X
, label = 2L * X + runif(nrow(X), 0L, 0.1) , label = 2L * X + runif(nrow(X), 0L, 0.1)
, params = list(num_threads = .LGB_MAX_THREADS)
)) ))
} }
...@@ -2416,6 +2524,7 @@ test_that("lgb.train() fit on linearly-relatead data improves when using linear ...@@ -2416,6 +2524,7 @@ test_that("lgb.train() fit on linearly-relatead data improves when using linear
, metric = "mse" , metric = "mse"
, seed = 0L , seed = 0L
, num_leaves = 2L , num_leaves = 2L
, num_threads = .LGB_MAX_THREADS
) )
dtrain <- .new_dataset() dtrain <- .new_dataset()
...@@ -2450,11 +2559,13 @@ test_that("lgb.train() with linear learner fails already-constructed dataset wit ...@@ -2450,11 +2559,13 @@ test_that("lgb.train() with linear learner fails already-constructed dataset wit
, metric = "mse" , metric = "mse"
, seed = 0L , seed = 0L
, num_leaves = 2L , num_leaves = 2L
, num_threads = .LGB_MAX_THREADS
) )
dtrain <- lgb.Dataset( dtrain <- lgb.Dataset(
data = matrix(rnorm(100L), ncol = 1L) data = matrix(rnorm(100L), ncol = 1L)
, label = rnorm(100L) , label = rnorm(100L)
, params = list(num_threads = .LGB_MAX_THREADS)
) )
dtrain$construct() dtrain$construct()
expect_error({ expect_error({
...@@ -2480,6 +2591,7 @@ test_that("lgb.train() works with linear learners even if Dataset has missing va ...@@ -2480,6 +2591,7 @@ test_that("lgb.train() works with linear learners even if Dataset has missing va
return(lgb.Dataset( return(lgb.Dataset(
data = X data = X
, label = 2L * X + runif(nrow(X), 0L, 0.1) , label = 2L * X + runif(nrow(X), 0L, 0.1)
, params = list(num_threads = .LGB_MAX_THREADS)
)) ))
} }
...@@ -2489,6 +2601,7 @@ test_that("lgb.train() works with linear learners even if Dataset has missing va ...@@ -2489,6 +2601,7 @@ test_that("lgb.train() works with linear learners even if Dataset has missing va
, metric = "mse" , metric = "mse"
, seed = 0L , seed = 0L
, num_leaves = 2L , num_leaves = 2L
, num_threads = .LGB_MAX_THREADS
) )
dtrain <- .new_dataset() dtrain <- .new_dataset()
...@@ -2526,6 +2639,7 @@ test_that("lgb.train() works with linear learners, bagging, and a Dataset that h ...@@ -2526,6 +2639,7 @@ test_that("lgb.train() works with linear learners, bagging, and a Dataset that h
return(lgb.Dataset( return(lgb.Dataset(
data = X data = X
, label = 2L * X + runif(nrow(X), 0L, 0.1) , label = 2L * X + runif(nrow(X), 0L, 0.1)
, params = list(num_threads = .LGB_MAX_THREADS)
)) ))
} }
...@@ -2537,6 +2651,7 @@ test_that("lgb.train() works with linear learners, bagging, and a Dataset that h ...@@ -2537,6 +2651,7 @@ test_that("lgb.train() works with linear learners, bagging, and a Dataset that h
, num_leaves = 2L , num_leaves = 2L
, bagging_freq = 1L , bagging_freq = 1L
, subsample = 0.8 , subsample = 0.8
, num_threads = .LGB_MAX_THREADS
) )
dtrain <- .new_dataset() dtrain <- .new_dataset()
...@@ -2576,6 +2691,7 @@ test_that("lgb.train() works with linear learners and data where a feature has o ...@@ -2576,6 +2691,7 @@ test_that("lgb.train() works with linear learners and data where a feature has o
, label = 2L * X[, 1L] + runif(nrow(X), 0L, 0.1) , label = 2L * X[, 1L] + runif(nrow(X), 0L, 0.1)
, params = list( , params = list(
feature_pre_filter = FALSE feature_pre_filter = FALSE
, num_threads = .LGB_MAX_THREADS
) )
)) ))
} }
...@@ -2586,6 +2702,7 @@ test_that("lgb.train() works with linear learners and data where a feature has o ...@@ -2586,6 +2702,7 @@ test_that("lgb.train() works with linear learners and data where a feature has o
, metric = "mse" , metric = "mse"
, seed = 0L , seed = 0L
, num_leaves = 2L , num_leaves = 2L
, num_threads = .LGB_MAX_THREADS
) )
dtrain <- .new_dataset() dtrain <- .new_dataset()
...@@ -2606,6 +2723,7 @@ test_that("lgb.train() works with linear learners when Dataset has categorical f ...@@ -2606,6 +2723,7 @@ test_that("lgb.train() works with linear learners when Dataset has categorical f
return(lgb.Dataset( return(lgb.Dataset(
data = X data = X
, label = 2L * X[, 1L] + runif(nrow(X), 0L, 0.1) , label = 2L * X[, 1L] + runif(nrow(X), 0L, 0.1)
, params = list(num_threads = .LGB_MAX_THREADS)
)) ))
} }
...@@ -2616,6 +2734,7 @@ test_that("lgb.train() works with linear learners when Dataset has categorical f ...@@ -2616,6 +2734,7 @@ test_that("lgb.train() works with linear learners when Dataset has categorical f
, seed = 0L , seed = 0L
, num_leaves = 2L , num_leaves = 2L
, categorical_feature = 1L , categorical_feature = 1L
, num_threads = .LGB_MAX_THREADS
) )
dtrain <- .new_dataset() dtrain <- .new_dataset()
...@@ -2682,12 +2801,13 @@ test_that("lgb.train() throws an informative error if interaction_constraints co ...@@ -2682,12 +2801,13 @@ test_that("lgb.train() throws an informative error if interaction_constraints co
test_that(paste0("lgb.train() gives same result when interaction_constraints is specified as a list of ", test_that(paste0("lgb.train() gives same result when interaction_constraints is specified as a list of ",
"character vectors, numeric vectors, or a combination"), { "character vectors, numeric vectors, or a combination"), {
set.seed(1L) set.seed(1L)
dtrain <- lgb.Dataset(train$data, label = train$label) dtrain <- lgb.Dataset(train$data, label = train$label, params = list(num_threads = .LGB_MAX_THREADS))
params <- list( params <- list(
objective = "regression" objective = "regression"
, interaction_constraints = list(c(1L, 2L), 3L) , interaction_constraints = list(c(1L, 2L), 3L)
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
bst <- lightgbm( bst <- lightgbm(
data = dtrain data = dtrain
...@@ -2701,6 +2821,7 @@ test_that(paste0("lgb.train() gives same result when interaction_constraints is ...@@ -2701,6 +2821,7 @@ test_that(paste0("lgb.train() gives same result when interaction_constraints is
objective = "regression" objective = "regression"
, interaction_constraints = list(c(cnames[[1L]], cnames[[2L]]), cnames[[3L]]) , interaction_constraints = list(c(cnames[[1L]], cnames[[2L]]), cnames[[3L]])
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
bst <- lightgbm( bst <- lightgbm(
data = dtrain data = dtrain
...@@ -2713,6 +2834,7 @@ test_that(paste0("lgb.train() gives same result when interaction_constraints is ...@@ -2713,6 +2834,7 @@ test_that(paste0("lgb.train() gives same result when interaction_constraints is
objective = "regression" objective = "regression"
, interaction_constraints = list(c(cnames[[1L]], cnames[[2L]]), 3L) , interaction_constraints = list(c(cnames[[1L]], cnames[[2L]]), 3L)
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
bst <- lightgbm( bst <- lightgbm(
data = dtrain data = dtrain
...@@ -2728,12 +2850,13 @@ test_that(paste0("lgb.train() gives same result when interaction_constraints is ...@@ -2728,12 +2850,13 @@ test_that(paste0("lgb.train() gives same result when interaction_constraints is
test_that(paste0("lgb.train() gives same results when using interaction_constraints and specifying colnames"), { test_that(paste0("lgb.train() gives same results when using interaction_constraints and specifying colnames"), {
set.seed(1L) set.seed(1L)
dtrain <- lgb.Dataset(train$data, label = train$label) dtrain <- lgb.Dataset(train$data, label = train$label, params = list(num_threads = .LGB_MAX_THREADS))
params <- list( params <- list(
objective = "regression" objective = "regression"
, interaction_constraints = list(c(1L, 2L), 3L) , interaction_constraints = list(c(1L, 2L), 3L)
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
bst <- lightgbm( bst <- lightgbm(
data = dtrain data = dtrain
...@@ -2747,6 +2870,7 @@ test_that(paste0("lgb.train() gives same results when using interaction_constrai ...@@ -2747,6 +2870,7 @@ test_that(paste0("lgb.train() gives same results when using interaction_constrai
objective = "regression" objective = "regression"
, interaction_constraints = list(c(new_colnames[1L], new_colnames[2L]), new_colnames[3L]) , interaction_constraints = list(c(new_colnames[1L], new_colnames[2L]), new_colnames[3L])
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
bst <- lightgbm( bst <- lightgbm(
data = dtrain data = dtrain
...@@ -2796,6 +2920,7 @@ test_that(paste0("lgb.train() gives same results when using interaction_constrai ...@@ -2796,6 +2920,7 @@ test_that(paste0("lgb.train() gives same results when using interaction_constrai
, categorical_feature = categorical_features , categorical_feature = categorical_features
, free_raw_data = FALSE , free_raw_data = FALSE
, colnames = c("feature_1", "feature_2", "feature_3") , colnames = c("feature_1", "feature_2", "feature_3")
, params = list(num_threads = .LGB_MAX_THREADS)
)) ))
} }
...@@ -2891,6 +3016,7 @@ for (x3_to_categorical in c(TRUE, FALSE)) { ...@@ -2891,6 +3016,7 @@ for (x3_to_categorical in c(TRUE, FALSE)) {
, monotone_constraints_method = monotone_constraints_method , monotone_constraints_method = monotone_constraints_method
, use_missing = FALSE , use_missing = FALSE
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
constrained_model <- lgb.train( constrained_model <- lgb.train(
params = params params = params
...@@ -2912,7 +3038,7 @@ test_that("lightgbm() accepts objective as function argument and under params", ...@@ -2912,7 +3038,7 @@ test_that("lightgbm() accepts objective as function argument and under params",
bst1 <- lightgbm( bst1 <- lightgbm(
data = train$data data = train$data
, label = train$label , label = train$label
, params = list(objective = "regression_l1") , params = list(objective = "regression_l1", num_threads = .LGB_MAX_THREADS)
, nrounds = 5L , nrounds = 5L
, verbose = VERBOSITY , verbose = VERBOSITY
) )
...@@ -2947,7 +3073,7 @@ test_that("lightgbm() prioritizes objective under params over objective as funct ...@@ -2947,7 +3073,7 @@ test_that("lightgbm() prioritizes objective under params over objective as funct
data = train$data data = train$data
, label = train$label , label = train$label
, objective = "regression" , objective = "regression"
, params = list(objective = "regression_l1") , params = list(objective = "regression_l1", num_threads = .LGB_MAX_THREADS)
, nrounds = 5L , nrounds = 5L
, verbose = VERBOSITY , verbose = VERBOSITY
) )
...@@ -2964,7 +3090,7 @@ test_that("lightgbm() prioritizes objective under params over objective as funct ...@@ -2964,7 +3090,7 @@ test_that("lightgbm() prioritizes objective under params over objective as funct
data = train$data data = train$data
, label = train$label , label = train$label
, objective = "regression" , objective = "regression"
, params = list(loss = "regression_l1") , params = list(loss = "regression_l1", num_threads = .LGB_MAX_THREADS)
, nrounds = 5L , nrounds = 5L
, verbose = VERBOSITY , verbose = VERBOSITY
) )
...@@ -2985,6 +3111,7 @@ test_that("lightgbm() accepts init_score as function argument", { ...@@ -2985,6 +3111,7 @@ test_that("lightgbm() accepts init_score as function argument", {
, objective = "binary" , objective = "binary"
, nrounds = 5L , nrounds = 5L
, verbose = VERBOSITY , verbose = VERBOSITY
, params = list(num_threads = .LGB_MAX_THREADS)
) )
pred1 <- predict(bst1, train$data, type = "raw") pred1 <- predict(bst1, train$data, type = "raw")
...@@ -2995,6 +3122,7 @@ test_that("lightgbm() accepts init_score as function argument", { ...@@ -2995,6 +3122,7 @@ test_that("lightgbm() accepts init_score as function argument", {
, objective = "binary" , objective = "binary"
, nrounds = 5L , nrounds = 5L
, verbose = VERBOSITY , verbose = VERBOSITY
, params = list(num_threads = .LGB_MAX_THREADS)
) )
pred2 <- predict(bst2, train$data, type = "raw") pred2 <- predict(bst2, train$data, type = "raw")
...@@ -3007,6 +3135,7 @@ test_that("lightgbm() defaults to 'regression' objective if objective not otherw ...@@ -3007,6 +3135,7 @@ test_that("lightgbm() defaults to 'regression' objective if objective not otherw
, label = train$label , label = train$label
, nrounds = 5L , nrounds = 5L
, verbose = VERBOSITY , verbose = VERBOSITY
, params = list(num_threads = .LGB_MAX_THREADS)
) )
expect_equal(bst$params$objective, "regression") expect_equal(bst$params$objective, "regression")
model_txt_lines <- strsplit( model_txt_lines <- strsplit(
...@@ -3081,6 +3210,7 @@ test_that("lightgbm() accepts 'weight' and 'weights'", { ...@@ -3081,6 +3210,7 @@ test_that("lightgbm() accepts 'weight' and 'weights'", {
, params = list( , params = list(
min_data_in_bin = 1L min_data_in_bin = 1L
, min_data_in_leaf = 1L , min_data_in_leaf = 1L
, num_threads = .LGB_MAX_THREADS
) )
) )
expect_equal(model$.__enclos_env__$private$train_set$get_field("weight"), w) expect_equal(model$.__enclos_env__$private$train_set$get_field("weight"), w)
...@@ -3146,6 +3276,7 @@ test_that("lightgbm() accepts 'weight' and 'weights'", { ...@@ -3146,6 +3276,7 @@ test_that("lightgbm() accepts 'weight' and 'weights'", {
, objective = "binary" , objective = "binary"
, metric = "auc" , metric = "auc"
, early_stopping_round = nrounds , early_stopping_round = nrounds
, num_threads = .LGB_MAX_THREADS
) )
if (!is.null(verbose_param)) { if (!is.null(verbose_param)) {
params[["verbose"]] <- verbose_param params[["verbose"]] <- verbose_param
...@@ -3162,6 +3293,7 @@ test_that("lightgbm() accepts 'weight' and 'weights'", { ...@@ -3162,6 +3293,7 @@ test_that("lightgbm() accepts 'weight' and 'weights'", {
train_kwargs[["data"]] <- lgb.Dataset( train_kwargs[["data"]] <- lgb.Dataset(
data = train$data data = train$data
, label = train$label , label = train$label
, params = list(num_threads = .LGB_MAX_THREADS)
) )
train_kwargs[["valids"]] <- list( train_kwargs[["valids"]] <- list(
"valid" = lgb.Dataset(data = test$data, label = test$label) "valid" = lgb.Dataset(data = test$data, label = test$label)
...@@ -3535,7 +3667,7 @@ test_that("lightgbm() changes objective='auto' appropriately", { ...@@ -3535,7 +3667,7 @@ test_that("lightgbm() changes objective='auto' appropriately", {
data("mtcars") data("mtcars")
y <- mtcars$mpg y <- mtcars$mpg
x <- as.matrix(mtcars[, -1L]) x <- as.matrix(mtcars[, -1L])
model <- lightgbm(x, y, objective = "auto", verbose = VERBOSITY, nrounds = 5L) model <- lightgbm(x, y, objective = "auto", verbose = VERBOSITY, nrounds = 5L, num_threads = .LGB_MAX_THREADS)
expect_equal(model$params$objective, "regression") expect_equal(model$params$objective, "regression")
model_txt_lines <- strsplit( model_txt_lines <- strsplit(
x = model$save_model_to_string() x = model$save_model_to_string()
...@@ -3548,7 +3680,7 @@ test_that("lightgbm() changes objective='auto' appropriately", { ...@@ -3548,7 +3680,7 @@ test_that("lightgbm() changes objective='auto' appropriately", {
# Binary classification # Binary classification
x <- train$data x <- train$data
y <- factor(train$label) y <- factor(train$label)
model <- lightgbm(x, y, objective = "auto", verbose = VERBOSITY, nrounds = 5L) model <- lightgbm(x, y, objective = "auto", verbose = VERBOSITY, nrounds = 5L, num_threads = .LGB_MAX_THREADS)
expect_equal(model$params$objective, "binary") expect_equal(model$params$objective, "binary")
model_txt_lines <- strsplit( model_txt_lines <- strsplit(
x = model$save_model_to_string() x = model$save_model_to_string()
...@@ -3561,7 +3693,7 @@ test_that("lightgbm() changes objective='auto' appropriately", { ...@@ -3561,7 +3693,7 @@ test_that("lightgbm() changes objective='auto' appropriately", {
data("iris") data("iris")
y <- factor(iris$Species) y <- factor(iris$Species)
x <- as.matrix(iris[, -5L]) x <- as.matrix(iris[, -5L])
model <- lightgbm(x, y, objective = "auto", verbose = VERBOSITY, nrounds = 5L) model <- lightgbm(x, y, objective = "auto", verbose = VERBOSITY, nrounds = 5L, num_threads = .LGB_MAX_THREADS)
expect_equal(model$params$objective, "multiclass") expect_equal(model$params$objective, "multiclass")
expect_equal(model$params$num_class, 3L) expect_equal(model$params$num_class, 3L)
model_txt_lines <- strsplit( model_txt_lines <- strsplit(
...@@ -3576,7 +3708,14 @@ test_that("lightgbm() determines number of classes for non-default multiclass ob ...@@ -3576,7 +3708,14 @@ test_that("lightgbm() determines number of classes for non-default multiclass ob
data("iris") data("iris")
y <- factor(iris$Species) y <- factor(iris$Species)
x <- as.matrix(iris[, -5L]) x <- as.matrix(iris[, -5L])
model <- lightgbm(x, y, objective = "multiclassova", verbose = VERBOSITY, nrounds = 5L) model <- lightgbm(
x
, y
, objective = "multiclassova"
, verbose = VERBOSITY
, nrounds = 5L
, num_threads = .LGB_MAX_THREADS
)
expect_equal(model$params$objective, "multiclassova") expect_equal(model$params$objective, "multiclassova")
expect_equal(model$params$num_class, 3L) expect_equal(model$params$num_class, 3L)
model_txt_lines <- strsplit( model_txt_lines <- strsplit(
...@@ -3592,7 +3731,7 @@ test_that("lightgbm() doesn't accept binary classification with non-binary facto ...@@ -3592,7 +3731,7 @@ test_that("lightgbm() doesn't accept binary classification with non-binary facto
y <- factor(iris$Species) y <- factor(iris$Species)
x <- as.matrix(iris[, -5L]) x <- as.matrix(iris[, -5L])
expect_error({ expect_error({
lightgbm(x, y, objective = "binary", verbose = VERBOSITY, nrounds = 5L) lightgbm(x, y, objective = "binary", verbose = VERBOSITY, nrounds = 5L, num_threads = .LGB_MAX_THREADS)
}, regexp = "Factors with >2 levels as labels only allowed for multi-class objectives") }, regexp = "Factors with >2 levels as labels only allowed for multi-class objectives")
}) })
...@@ -3603,7 +3742,7 @@ test_that("lightgbm() doesn't accept multi-class classification with binary fact ...@@ -3603,7 +3742,7 @@ test_that("lightgbm() doesn't accept multi-class classification with binary fact
y <- factor(y) y <- factor(y)
x <- as.matrix(iris[, -5L]) x <- as.matrix(iris[, -5L])
expect_error({ expect_error({
lightgbm(x, y, objective = "multiclass", verbose = VERBOSITY, nrounds = 5L) lightgbm(x, y, objective = "multiclass", verbose = VERBOSITY, nrounds = 5L, num_threads = .LGB_MAX_THREADS)
}, regexp = "Two-level factors as labels only allowed for objective='binary'") }, regexp = "Two-level factors as labels only allowed for objective='binary'")
}) })
...@@ -3611,7 +3750,7 @@ test_that("lightgbm() model predictions retain factor levels for multiclass clas ...@@ -3611,7 +3750,7 @@ test_that("lightgbm() model predictions retain factor levels for multiclass clas
data("iris") data("iris")
y <- factor(iris$Species) y <- factor(iris$Species)
x <- as.matrix(iris[, -5L]) x <- as.matrix(iris[, -5L])
model <- lightgbm(x, y, objective = "auto", verbose = VERBOSITY, nrounds = 5L) model <- lightgbm(x, y, objective = "auto", verbose = VERBOSITY, nrounds = 5L, num_threads = .LGB_MAX_THREADS)
pred <- predict(model, x, type = "class") pred <- predict(model, x, type = "class")
expect_true(is.factor(pred)) expect_true(is.factor(pred))
...@@ -3630,7 +3769,7 @@ test_that("lightgbm() model predictions retain factor levels for binary classifi ...@@ -3630,7 +3769,7 @@ test_that("lightgbm() model predictions retain factor levels for binary classifi
y[y == "setosa"] <- "versicolor" y[y == "setosa"] <- "versicolor"
y <- factor(y) y <- factor(y)
x <- as.matrix(iris[, -5L]) x <- as.matrix(iris[, -5L])
model <- lightgbm(x, y, objective = "auto", verbose = VERBOSITY, nrounds = 5L) model <- lightgbm(x, y, objective = "auto", verbose = VERBOSITY, nrounds = 5L, num_threads = .LGB_MAX_THREADS)
pred <- predict(model, x, type = "class") pred <- predict(model, x, type = "class")
expect_true(is.factor(pred)) expect_true(is.factor(pred))
......
...@@ -39,6 +39,7 @@ param <- list( ...@@ -39,6 +39,7 @@ param <- list(
, objective = logregobj , objective = logregobj
, metric = "auc" , metric = "auc"
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
num_round <- 10L num_round <- 10L
...@@ -54,6 +55,7 @@ test_that("using a custom objective, custom eval, and no other metrics works", { ...@@ -54,6 +55,7 @@ test_that("using a custom objective, custom eval, and no other metrics works", {
num_leaves = 8L num_leaves = 8L
, learning_rate = 1.0 , learning_rate = 1.0
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
, data = dtrain , data = dtrain
, nrounds = 4L , nrounds = 4L
......
...@@ -133,7 +133,7 @@ test_that("Dataset$set_reference() updates categorical_feature, colnames, and pr ...@@ -133,7 +133,7 @@ test_that("Dataset$set_reference() updates categorical_feature, colnames, and pr
dtrain$construct() dtrain$construct()
bst <- Booster$new( bst <- Booster$new(
train_set = dtrain train_set = dtrain
, params = list(verbose = -1L) , params = list(verbose = -1L, num_threads = .LGB_MAX_THREADS)
) )
dtrain$.__enclos_env__$private$predictor <- bst$to_predictor() dtrain$.__enclos_env__$private$predictor <- bst$to_predictor()
...@@ -394,6 +394,7 @@ test_that("lgb.Dataset: should be able to run lgb.train() immediately after usin ...@@ -394,6 +394,7 @@ test_that("lgb.Dataset: should be able to run lgb.train() immediately after usin
, num_leaves = 5L , num_leaves = 5L
, learning_rate = 1.0 , learning_rate = 1.0
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
# should be able to train right away # should be able to train right away
...@@ -429,6 +430,7 @@ test_that("lgb.Dataset: should be able to run lgb.cv() immediately after using l ...@@ -429,6 +430,7 @@ test_that("lgb.Dataset: should be able to run lgb.cv() immediately after using l
, learning_rate = 1.0 , learning_rate = 1.0
, num_iterations = 5L , num_iterations = 5L
, verbosity = VERBOSITY , verbosity = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
# should be able to train right away # should be able to train right away
......
...@@ -27,6 +27,7 @@ test_that("learning-to-rank with lgb.train() works as expected", { ...@@ -27,6 +27,7 @@ test_that("learning-to-rank with lgb.train() works as expected", {
, lambdarank_truncation_level = 3L , lambdarank_truncation_level = 3L
, learning_rate = 0.001 , learning_rate = 0.001
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
model <- lgb.train( model <- lgb.train(
params = params params = params
...@@ -91,6 +92,7 @@ test_that("learning-to-rank with lgb.cv() works as expected", { ...@@ -91,6 +92,7 @@ test_that("learning-to-rank with lgb.cv() works as expected", {
, min_data = 1L , min_data = 1L
, learning_rate = 0.01 , learning_rate = 0.01
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
nfold <- 4L nfold <- 4L
nrounds <- 10L nrounds <- 10L
......
...@@ -13,6 +13,7 @@ test_that("Booster$finalize() should not fail", { ...@@ -13,6 +13,7 @@ test_that("Booster$finalize() should not fail", {
data = dtrain data = dtrain
, params = list( , params = list(
objective = "regression" objective = "regression"
, num_threads = .LGB_MAX_THREADS
) )
, verbose = VERBOSITY , verbose = VERBOSITY
, nrounds = 3L , nrounds = 3L
...@@ -66,6 +67,7 @@ test_that("lgb.get.eval.result() should throw an informative error for incorrect ...@@ -66,6 +67,7 @@ test_that("lgb.get.eval.result() should throw an informative error for incorrect
, min_data = 1L , min_data = 1L
, learning_rate = 1.0 , learning_rate = 1.0
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
, data = dtrain , data = dtrain
, nrounds = 5L , nrounds = 5L
...@@ -185,6 +187,7 @@ test_that("Loading a Booster from a text file works", { ...@@ -185,6 +187,7 @@ test_that("Loading a Booster from a text file works", {
, learning_rate = 1.0 , learning_rate = 1.0
, objective = "binary" , objective = "binary"
, verbosity = VERBOSITY , verbosity = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
bst <- lightgbm( bst <- lightgbm(
data = as.matrix(train$data) data = as.matrix(train$data)
...@@ -227,6 +230,7 @@ test_that("boosters with linear models at leaves can be written to text file and ...@@ -227,6 +230,7 @@ test_that("boosters with linear models at leaves can be written to text file and
, metric = "mse" , metric = "mse"
, seed = 0L , seed = 0L
, num_leaves = 2L , num_leaves = 2L
, num_threads = .LGB_MAX_THREADS
) )
bst <- lgb.train( bst <- lgb.train(
...@@ -268,6 +272,7 @@ test_that("Loading a Booster from a string works", { ...@@ -268,6 +272,7 @@ test_that("Loading a Booster from a string works", {
, learning_rate = 1.0 , learning_rate = 1.0
, objective = "binary" , objective = "binary"
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
, nrounds = 2L , nrounds = 2L
) )
...@@ -299,6 +304,7 @@ test_that("Saving a large model to string should work", { ...@@ -299,6 +304,7 @@ test_that("Saving a large model to string should work", {
num_leaves = 100L num_leaves = 100L
, learning_rate = 0.01 , learning_rate = 0.01
, objective = "binary" , objective = "binary"
, num_threads = .LGB_MAX_THREADS
) )
, nrounds = 500L , nrounds = 500L
, verbose = VERBOSITY , verbose = VERBOSITY
...@@ -342,6 +348,7 @@ test_that("Saving a large model to JSON should work", { ...@@ -342,6 +348,7 @@ test_that("Saving a large model to JSON should work", {
num_leaves = 100L num_leaves = 100L
, learning_rate = 0.01 , learning_rate = 0.01
, objective = "binary" , objective = "binary"
, num_threads = .LGB_MAX_THREADS
) )
, nrounds = 200L , nrounds = 200L
, verbose = VERBOSITY , verbose = VERBOSITY
...@@ -372,6 +379,7 @@ test_that("If a string and a file are both passed to lgb.load() the file is used ...@@ -372,6 +379,7 @@ test_that("If a string and a file are both passed to lgb.load() the file is used
, learning_rate = 1.0 , learning_rate = 1.0
, objective = "binary" , objective = "binary"
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
, nrounds = 2L , nrounds = 2L
) )
...@@ -406,6 +414,7 @@ test_that("Creating a Booster from a Dataset should work", { ...@@ -406,6 +414,7 @@ test_that("Creating a Booster from a Dataset should work", {
params = list( params = list(
objective = "binary" objective = "binary"
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
), ),
train_set = dtrain train_set = dtrain
) )
...@@ -427,6 +436,7 @@ test_that("Creating a Booster from a Dataset with an existing predictor should w ...@@ -427,6 +436,7 @@ test_that("Creating a Booster from a Dataset with an existing predictor should w
, learning_rate = 1.0 , learning_rate = 1.0
, objective = "binary" , objective = "binary"
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
, nrounds = nrounds , nrounds = nrounds
) )
...@@ -440,6 +450,7 @@ test_that("Creating a Booster from a Dataset with an existing predictor should w ...@@ -440,6 +450,7 @@ test_that("Creating a Booster from a Dataset with an existing predictor should w
train_set = dtest train_set = dtest
, params = list( , params = list(
verbose = VERBOSITY verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
) )
expect_true(lgb.is.Booster(bst)) expect_true(lgb.is.Booster(bst))
...@@ -463,6 +474,7 @@ test_that("Booster$eval() should work on a Dataset stored in a binary file", { ...@@ -463,6 +474,7 @@ test_that("Booster$eval() should work on a Dataset stored in a binary file", {
, metric = "l2" , metric = "l2"
, num_leaves = 4L , num_leaves = 4L
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
, data = dtrain , data = dtrain
, nrounds = 2L , nrounds = 2L
...@@ -492,7 +504,7 @@ test_that("Booster$eval() should work on a Dataset stored in a binary file", { ...@@ -492,7 +504,7 @@ test_that("Booster$eval() should work on a Dataset stored in a binary file", {
eval_from_file <- bst$eval( eval_from_file <- bst$eval(
data = lgb.Dataset( data = lgb.Dataset(
data = test_file data = test_file
, params = list(verbose = VERBOSITY) , params = list(verbose = VERBOSITY, num_threads = .LGB_MAX_THREADS)
)$construct() )$construct()
, name = "test" , name = "test"
) )
...@@ -521,6 +533,7 @@ test_that("Booster$rollback_one_iter() should work as expected", { ...@@ -521,6 +533,7 @@ test_that("Booster$rollback_one_iter() should work as expected", {
, learning_rate = 1.0 , learning_rate = 1.0
, objective = "binary" , objective = "binary"
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
, nrounds = nrounds , nrounds = nrounds
) )
...@@ -555,6 +568,7 @@ test_that("Booster$update() passing a train_set works as expected", { ...@@ -555,6 +568,7 @@ test_that("Booster$update() passing a train_set works as expected", {
, learning_rate = 1.0 , learning_rate = 1.0
, objective = "binary" , objective = "binary"
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
, nrounds = nrounds , nrounds = nrounds
) )
...@@ -579,6 +593,7 @@ test_that("Booster$update() passing a train_set works as expected", { ...@@ -579,6 +593,7 @@ test_that("Booster$update() passing a train_set works as expected", {
, learning_rate = 1.0 , learning_rate = 1.0
, objective = "binary" , objective = "binary"
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
, nrounds = nrounds + 1L , nrounds = nrounds + 1L
) )
...@@ -604,6 +619,7 @@ test_that("Booster$update() throws an informative error if you provide a non-Dat ...@@ -604,6 +619,7 @@ test_that("Booster$update() throws an informative error if you provide a non-Dat
, learning_rate = 1.0 , learning_rate = 1.0
, objective = "binary" , objective = "binary"
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
, nrounds = nrounds , nrounds = nrounds
) )
...@@ -631,6 +647,7 @@ test_that("Booster should store parameters and Booster$reset_parameter() should ...@@ -631,6 +647,7 @@ test_that("Booster should store parameters and Booster$reset_parameter() should
, boosting = "gbdt" , boosting = "gbdt"
, num_class = 5L , num_class = 5L
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
bst <- Booster$new( bst <- Booster$new(
params = params params = params
...@@ -658,6 +675,7 @@ test_that("Booster$params should include dataset params, before and after Booste ...@@ -658,6 +675,7 @@ test_that("Booster$params should include dataset params, before and after Booste
, max_depth = 4L , max_depth = 4L
, bagging_fraction = 0.8 , bagging_fraction = 0.8
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
bst <- Booster$new( bst <- Booster$new(
params = params params = params
...@@ -670,6 +688,7 @@ test_that("Booster$params should include dataset params, before and after Booste ...@@ -670,6 +688,7 @@ test_that("Booster$params should include dataset params, before and after Booste
, max_depth = 4L , max_depth = 4L
, bagging_fraction = 0.8 , bagging_fraction = 0.8
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
, max_bin = 17L , max_bin = 17L
) )
) )
...@@ -681,6 +700,7 @@ test_that("Booster$params should include dataset params, before and after Booste ...@@ -681,6 +700,7 @@ test_that("Booster$params should include dataset params, before and after Booste
, max_depth = 4L , max_depth = 4L
, bagging_fraction = 0.9 , bagging_fraction = 0.9
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
, max_bin = 17L , max_bin = 17L
) )
expect_identical(ret_bst$params, expected_params) expect_identical(ret_bst$params, expected_params)
...@@ -699,6 +719,7 @@ test_that("Saving a model with different feature importance types works", { ...@@ -699,6 +719,7 @@ test_that("Saving a model with different feature importance types works", {
, learning_rate = 1.0 , learning_rate = 1.0
, objective = "binary" , objective = "binary"
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
, nrounds = 2L , nrounds = 2L
) )
...@@ -754,6 +775,7 @@ test_that("Saving a model with unknown importance type fails", { ...@@ -754,6 +775,7 @@ test_that("Saving a model with unknown importance type fails", {
, learning_rate = 1.0 , learning_rate = 1.0
, objective = "binary" , objective = "binary"
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
, nrounds = 2L , nrounds = 2L
) )
...@@ -789,6 +811,7 @@ test_that("all parameters are stored correctly with save_model_to_string()", { ...@@ -789,6 +811,7 @@ test_that("all parameters are stored correctly with save_model_to_string()", {
params = list( params = list(
objective = "regression" objective = "regression"
, metric = "l2" , metric = "l2"
, num_threads = .LGB_MAX_THREADS
) )
, data = dtrain , data = dtrain
, nrounds = nrounds , nrounds = nrounds
...@@ -845,6 +868,7 @@ test_that("early_stopping, num_iterations are stored correctly in model string e ...@@ -845,6 +868,7 @@ test_that("early_stopping, num_iterations are stored correctly in model string e
, n_iter = n_iter , n_iter = n_iter
, early_stopping_round = early_stopping_round , early_stopping_round = early_stopping_round
, n_iter_no_change = n_iter_no_change , n_iter_no_change = n_iter_no_change
, num_threads = .LGB_MAX_THREADS
) )
bst <- lgb.train( bst <- lgb.train(
...@@ -884,6 +908,7 @@ test_that("Booster: method calls Booster with a null handle should raise an info ...@@ -884,6 +908,7 @@ test_that("Booster: method calls Booster with a null handle should raise an info
objective = "regression" objective = "regression"
, metric = "l2" , metric = "l2"
, num_leaves = 8L , num_leaves = 8L
, num_threads = .LGB_MAX_THREADS
) )
, data = dtrain , data = dtrain
, verbose = VERBOSITY , verbose = VERBOSITY
...@@ -1074,6 +1099,7 @@ test_that("lgb.cv() correctly handles passing through params to the model file", ...@@ -1074,6 +1099,7 @@ test_that("lgb.cv() correctly handles passing through params to the model file",
, early_stopping_round = early_stopping_round , early_stopping_round = early_stopping_round
, n_iter_no_change = n_iter_no_change , n_iter_no_change = n_iter_no_change
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
cv_bst <- lgb.cv( cv_bst <- lgb.cv(
...@@ -1118,6 +1144,7 @@ test_that("params (including dataset params) should be stored in .rds file for B ...@@ -1118,6 +1144,7 @@ test_that("params (including dataset params) should be stored in .rds file for B
, max_depth = 4L , max_depth = 4L
, bagging_fraction = 0.8 , bagging_fraction = 0.8
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
bst <- Booster$new( bst <- Booster$new(
params = params params = params
...@@ -1134,6 +1161,7 @@ test_that("params (including dataset params) should be stored in .rds file for B ...@@ -1134,6 +1161,7 @@ test_that("params (including dataset params) should be stored in .rds file for B
, max_depth = 4L , max_depth = 4L
, bagging_fraction = 0.8 , bagging_fraction = 0.8
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
, max_bin = 17L , max_bin = 17L
) )
) )
...@@ -1153,6 +1181,7 @@ test_that("params (including dataset params) should be stored in .rds file for B ...@@ -1153,6 +1181,7 @@ test_that("params (including dataset params) should be stored in .rds file for B
, max_depth = 4L , max_depth = 4L
, bagging_fraction = 0.8 , bagging_fraction = 0.8
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
bst <- Booster$new( bst <- Booster$new(
params = params params = params
...@@ -1169,6 +1198,7 @@ test_that("params (including dataset params) should be stored in .rds file for B ...@@ -1169,6 +1198,7 @@ test_that("params (including dataset params) should be stored in .rds file for B
, max_depth = 4L , max_depth = 4L
, bagging_fraction = 0.8 , bagging_fraction = 0.8
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
, max_bin = 17L , max_bin = 17L
) )
) )
...@@ -1184,6 +1214,7 @@ test_that("Handle is automatically restored when calling predict", { ...@@ -1184,6 +1214,7 @@ test_that("Handle is automatically restored when calling predict", {
, params = list( , params = list(
verbose = VERBOSITY verbose = VERBOSITY
) )
, num_threads = .LGB_MAX_THREADS
) )
bst_file <- tempfile(fileext = ".rds") bst_file <- tempfile(fileext = ".rds")
saveRDS(bst, file = bst_file) saveRDS(bst, file = bst_file)
...@@ -1209,6 +1240,7 @@ test_that("boosters with linear models at leaves work with saveRDS.lgb.Booster a ...@@ -1209,6 +1240,7 @@ test_that("boosters with linear models at leaves work with saveRDS.lgb.Booster a
, metric = "mse" , metric = "mse"
, seed = 0L , seed = 0L
, num_leaves = 2L , num_leaves = 2L
, num_threads = .LGB_MAX_THREADS
) )
bst <- lgb.train( bst <- lgb.train(
...@@ -1248,6 +1280,7 @@ test_that("boosters with linear models at leaves can be written to RDS and re-lo ...@@ -1248,6 +1280,7 @@ test_that("boosters with linear models at leaves can be written to RDS and re-lo
, metric = "mse" , metric = "mse"
, seed = 0L , seed = 0L
, num_leaves = 2L , num_leaves = 2L
, num_threads = .LGB_MAX_THREADS
) )
bst <- lgb.train( bst <- lgb.train(
...@@ -1344,6 +1377,7 @@ test_that("Booster's print, show, and summary work correctly", { ...@@ -1344,6 +1377,7 @@ test_that("Booster's print, show, and summary work correctly", {
params = list( params = list(
objective = "regression" objective = "regression"
, min_data_in_leaf = 1L , min_data_in_leaf = 1L
, num_threads = .LGB_MAX_THREADS
) )
, data = lgb.Dataset( , data = lgb.Dataset(
as.matrix(mtcars[, -1L]) as.matrix(mtcars[, -1L])
...@@ -1359,7 +1393,7 @@ test_that("Booster's print, show, and summary work correctly", { ...@@ -1359,7 +1393,7 @@ test_that("Booster's print, show, and summary work correctly", {
data("iris") data("iris")
model <- lgb.train( model <- lgb.train(
params = list(objective = "multiclass", num_class = 3L) params = list(objective = "multiclass", num_class = 3L, num_threads = .LGB_MAX_THREADS)
, data = lgb.Dataset( , data = lgb.Dataset(
as.matrix(iris[, -5L]) as.matrix(iris[, -5L])
, label = as.numeric(factor(iris$Species)) - 1.0 , label = as.numeric(factor(iris$Species)) - 1.0
...@@ -1399,6 +1433,7 @@ test_that("Booster's print, show, and summary work correctly", { ...@@ -1399,6 +1433,7 @@ test_that("Booster's print, show, and summary work correctly", {
, eval = .evalerror , eval = .evalerror
, verbose = VERBOSITY , verbose = VERBOSITY
, nrounds = 5L , nrounds = 5L
, params = list(num_threads = .LGB_MAX_THREADS)
) )
.check_methods_work(model) .check_methods_work(model)
...@@ -1410,6 +1445,7 @@ test_that("LGBM_BoosterGetNumFeature_R returns correct outputs", { ...@@ -1410,6 +1445,7 @@ test_that("LGBM_BoosterGetNumFeature_R returns correct outputs", {
params = list( params = list(
objective = "regression" objective = "regression"
, min_data_in_leaf = 1L , min_data_in_leaf = 1L
, num_threads = .LGB_MAX_THREADS
) )
, data = lgb.Dataset( , data = lgb.Dataset(
as.matrix(mtcars[, -1L]) as.matrix(mtcars[, -1L])
......
...@@ -31,6 +31,7 @@ test_that("lgb.intereprete works as expected for binary classification", { ...@@ -31,6 +31,7 @@ test_that("lgb.intereprete works as expected for binary classification", {
, min_data_in_leaf = 1L , min_data_in_leaf = 1L
, min_sum_hessian_in_leaf = 1.0 , min_sum_hessian_in_leaf = 1.0
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
model <- lgb.train( model <- lgb.train(
params = params params = params
...@@ -83,6 +84,7 @@ test_that("lgb.intereprete works as expected for multiclass classification", { ...@@ -83,6 +84,7 @@ test_that("lgb.intereprete works as expected for multiclass classification", {
, learning_rate = 0.00001 , learning_rate = 0.00001
, min_data = 1L , min_data = 1L
, verbose = VERBOSITY , verbose = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
model <- lgb.train( model <- lgb.train(
params = params params = params
......
...@@ -14,6 +14,7 @@ test_that("lgb.plot.importance() should run without error for well-formed inputs ...@@ -14,6 +14,7 @@ test_that("lgb.plot.importance() should run without error for well-formed inputs
, min_data_in_leaf = 1L , min_data_in_leaf = 1L
, min_sum_hessian_in_leaf = 1.0 , min_sum_hessian_in_leaf = 1.0
, verbosity = VERBOSITY , verbosity = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
model <- lgb.train(params, dtrain, 3L) model <- lgb.train(params, dtrain, 3L)
tree_imp <- lgb.importance(model, percentage = TRUE) tree_imp <- lgb.importance(model, percentage = TRUE)
......
...@@ -31,6 +31,7 @@ test_that("lgb.plot.interepretation works as expected for binary classification" ...@@ -31,6 +31,7 @@ test_that("lgb.plot.interepretation works as expected for binary classification"
, min_data_in_leaf = 1L , min_data_in_leaf = 1L
, min_sum_hessian_in_leaf = 1.0 , min_sum_hessian_in_leaf = 1.0
, verbosity = VERBOSITY , verbosity = VERBOSITY
, num_threads = .LGB_MAX_THREADS
) )
model <- lgb.train( model <- lgb.train(
params = params params = params
...@@ -80,6 +81,7 @@ test_that("lgb.plot.interepretation works as expected for multiclass classificat ...@@ -80,6 +81,7 @@ test_that("lgb.plot.interepretation works as expected for multiclass classificat
, num_class = 3L , num_class = 3L
, learning_rate = 0.00001 , learning_rate = 0.00001
, min_data = 1L , min_data = 1L
, num_threads = .LGB_MAX_THREADS
) )
model <- lgb.train( model <- lgb.train(
params = params params = params
......
...@@ -20,6 +20,7 @@ test_that("Feature penalties work properly", { ...@@ -20,6 +20,7 @@ test_that("Feature penalties work properly", {
, objective = "binary" , objective = "binary"
, feature_penalty = paste0(feature_penalties, collapse = ",") , feature_penalty = paste0(feature_penalties, collapse = ",")
, metric = "binary_error" , metric = "binary_error"
, num_threads = .LGB_MAX_THREADS
) )
, nrounds = 5L , nrounds = 5L
, verbose = -1L , verbose = -1L
...@@ -97,6 +98,7 @@ test_that("training should warn if you use 'dart' boosting, specified with 'boos ...@@ -97,6 +98,7 @@ test_that("training should warn if you use 'dart' boosting, specified with 'boos
, learning_rate = 0.05 , learning_rate = 0.05
, objective = "binary" , objective = "binary"
, metric = "binary_error" , metric = "binary_error"
, num_threads = .LGB_MAX_THREADS
) )
params[[boosting_param]] <- "dart" params[[boosting_param]] <- "dart"
expect_warning({ expect_warning({
......
...@@ -9,7 +9,7 @@ test_that("Gamma regression reacts on 'weight'", { ...@@ -9,7 +9,7 @@ test_that("Gamma regression reacts on 'weight'", {
y <- X[, 1L] + X[, 2L] + runif(n) y <- X[, 1L] + X[, 2L] + runif(n)
X_pred <- X[1L:5L, ] X_pred <- X[1L:5L, ]
params <- list(objective = "gamma") params <- list(objective = "gamma", num_threads = .LGB_MAX_THREADS)
# Unweighted # Unweighted
dtrain <- lgb.Dataset(X, label = y) dtrain <- lgb.Dataset(X, label = y)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment