Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
tianlh
LightGBM-DCU
Commits
44928d3a
Unverified
Commit
44928d3a
authored
Jul 21, 2023
by
James Lamb
Committed by
GitHub
Jul 21, 2023
Browse files
[R-package] consolidate testing constants in helpers file (#5992)
parent
8967debe
Changes
11
Expand all
Show whitespace changes
Inline
Side-by-side
Showing
11 changed files
with
232 additions
and
273 deletions
+232
-273
R-package/tests/testthat/helper.R
R-package/tests/testthat/helper.R
+19
-0
R-package/tests/testthat/test_Predictor.R
R-package/tests/testthat/test_Predictor.R
+19
-25
R-package/tests/testthat/test_basic.R
R-package/tests/testthat/test_basic.R
+115
-127
R-package/tests/testthat/test_custom_objective.R
R-package/tests/testthat/test_custom_objective.R
+5
-11
R-package/tests/testthat/test_dataset.R
R-package/tests/testthat/test_dataset.R
+8
-12
R-package/tests/testthat/test_learning_to_rank.R
R-package/tests/testthat/test_learning_to_rank.R
+10
-19
R-package/tests/testthat/test_lgb.Booster.R
R-package/tests/testthat/test_lgb.Booster.R
+47
-54
R-package/tests/testthat/test_lgb.interprete.R
R-package/tests/testthat/test_lgb.interprete.R
+2
-6
R-package/tests/testthat/test_lgb.plot.importance.R
R-package/tests/testthat/test_lgb.plot.importance.R
+1
-5
R-package/tests/testthat/test_lgb.plot.interpretation.R
R-package/tests/testthat/test_lgb.plot.interpretation.R
+2
-6
R-package/tests/testthat/test_weighted_loss.R
R-package/tests/testthat/test_weighted_loss.R
+4
-8
No files found.
R-package/tests/testthat/helper.R
View file @
44928d3a
# ref for this file:
# ref for this file:
#
#
# * https://r-pkgs.org/testing-design.html#testthat-helper-files
# * https://r-pkgs.org/testing-design.html#testthat-setup-files
# * https://r-pkgs.org/testing-design.html#testthat-setup-files
# LightGBM-internal fix to comply with CRAN policy of only using up to 2 threads in tests and example.
# LightGBM-internal fix to comply with CRAN policy of only using up to 2 threads in tests and example.
...
@@ -10,3 +11,21 @@
...
@@ -10,3 +11,21 @@
# the check farm is a shared resource and will typically be running many checks simultaneously.
# the check farm is a shared resource and will typically be running many checks simultaneously.
#
#
.LGB_MAX_THREADS
<-
2L
.LGB_MAX_THREADS
<-
2L
# by default, how much should results in tests be allowed to differ from hard-coded expected numbers?
.LGB_NUMERIC_TOLERANCE
<-
1e-6
# are the tests running on Windows?
.LGB_ON_WINDOWS
<-
.Platform
$
OS.type
==
"windows"
.LGB_ON_32_BIT_WINDOWS
<-
.LGB_ON_WINDOWS
&&
.Machine
$
sizeof.pointer
!=
8L
# are the tests running in a UTF-8 locale?
.LGB_UTF8_LOCALE
<-
all
(
endsWith
(
Sys.getlocale
(
category
=
"LC_CTYPE"
)
,
"UTF-8"
))
# control how many loud LightGBM's logger is in tests
.LGB_VERBOSITY
<-
as.integer
(
Sys.getenv
(
"LIGHTGBM_TEST_VERBOSITY"
,
"-1"
)
)
R-package/tests/testthat/test_Predictor.R
View file @
44928d3a
library
(
Matrix
)
library
(
Matrix
)
VERBOSITY
<-
as.integer
(
Sys.getenv
(
"LIGHTGBM_TEST_VERBOSITY"
,
"-1"
)
)
TOLERANCE
<-
1e-6
test_that
(
"Predictor$finalize() should not fail"
,
{
test_that
(
"Predictor$finalize() should not fail"
,
{
X
<-
as.matrix
(
as.integer
(
iris
[,
"Species"
]),
ncol
=
1L
)
X
<-
as.matrix
(
as.integer
(
iris
[,
"Species"
]),
ncol
=
1L
)
y
<-
iris
[[
"Sepal.Length"
]]
y
<-
iris
[[
"Sepal.Length"
]]
...
@@ -16,7 +10,7 @@ test_that("Predictor$finalize() should not fail", {
...
@@ -16,7 +10,7 @@ test_that("Predictor$finalize() should not fail", {
objective
=
"regression"
objective
=
"regression"
,
num_threads
=
.LGB_MAX_THREADS
,
num_threads
=
.LGB_MAX_THREADS
)
)
,
verbose
=
VERBOSITY
,
verbose
=
.LGB_
VERBOSITY
,
nrounds
=
3L
,
nrounds
=
3L
)
)
model_file
<-
tempfile
(
fileext
=
".model"
)
model_file
<-
tempfile
(
fileext
=
".model"
)
...
@@ -45,7 +39,7 @@ test_that("predictions do not fail for integer input", {
...
@@ -45,7 +39,7 @@ test_that("predictions do not fail for integer input", {
objective
=
"regression"
objective
=
"regression"
,
num_threads
=
.LGB_MAX_THREADS
,
num_threads
=
.LGB_MAX_THREADS
)
)
,
verbose
=
VERBOSITY
,
verbose
=
.LGB_
VERBOSITY
,
nrounds
=
3L
,
nrounds
=
3L
)
)
X_double
<-
X
[
c
(
1L
,
51L
,
101L
),
,
drop
=
FALSE
]
X_double
<-
X
[
c
(
1L
,
51L
,
101L
),
,
drop
=
FALSE
]
...
@@ -78,7 +72,7 @@ test_that("start_iteration works correctly", {
...
@@ -78,7 +72,7 @@ test_that("start_iteration works correctly", {
num_leaves
=
4L
num_leaves
=
4L
,
learning_rate
=
0.6
,
learning_rate
=
0.6
,
objective
=
"binary"
,
objective
=
"binary"
,
verbosity
=
VERBOSITY
,
verbosity
=
.LGB_
VERBOSITY
,
num_threads
=
.LGB_MAX_THREADS
,
num_threads
=
.LGB_MAX_THREADS
)
)
,
nrounds
=
50L
,
nrounds
=
50L
...
@@ -128,7 +122,7 @@ test_that("Feature contributions from sparse inputs produce sparse outputs", {
...
@@ -128,7 +122,7 @@ test_that("Feature contributions from sparse inputs produce sparse outputs", {
data
=
dtrain
data
=
dtrain
,
obj
=
"regression"
,
obj
=
"regression"
,
nrounds
=
5L
,
nrounds
=
5L
,
verbose
=
VERBOSITY
,
verbose
=
.LGB_
VERBOSITY
,
params
=
list
(
min_data_in_leaf
=
5L
,
num_threads
=
.LGB_MAX_THREADS
)
,
params
=
list
(
min_data_in_leaf
=
5L
,
num_threads
=
.LGB_MAX_THREADS
)
)
)
...
@@ -159,7 +153,7 @@ test_that("Sparse feature contribution predictions do not take inputs with wrong
...
@@ -159,7 +153,7 @@ test_that("Sparse feature contribution predictions do not take inputs with wrong
data
=
dtrain
data
=
dtrain
,
obj
=
"regression"
,
obj
=
"regression"
,
nrounds
=
5L
,
nrounds
=
5L
,
verbose
=
VERBOSITY
,
verbose
=
.LGB_
VERBOSITY
,
params
=
list
(
min_data_in_leaf
=
5L
,
num_threads
=
.LGB_MAX_THREADS
)
,
params
=
list
(
min_data_in_leaf
=
5L
,
num_threads
=
.LGB_MAX_THREADS
)
)
)
...
@@ -189,7 +183,7 @@ test_that("Feature contribution predictions do not take non-general CSR or CSC i
...
@@ -189,7 +183,7 @@ test_that("Feature contribution predictions do not take non-general CSR or CSC i
data
=
dtrain
data
=
dtrain
,
obj
=
"regression"
,
obj
=
"regression"
,
nrounds
=
5L
,
nrounds
=
5L
,
verbose
=
VERBOSITY
,
verbose
=
.LGB_
VERBOSITY
,
params
=
list
(
min_data_in_leaf
=
5L
,
num_threads
=
.LGB_MAX_THREADS
)
,
params
=
list
(
min_data_in_leaf
=
5L
,
num_threads
=
.LGB_MAX_THREADS
)
)
)
...
@@ -217,14 +211,14 @@ test_that("predict() params should override keyword argument for raw-score predi
...
@@ -217,14 +211,14 @@ test_that("predict() params should override keyword argument for raw-score predi
,
num_threads
=
.LGB_MAX_THREADS
,
num_threads
=
.LGB_MAX_THREADS
)
)
,
nrounds
=
10L
,
nrounds
=
10L
,
verbose
=
VERBOSITY
,
verbose
=
.LGB_
VERBOSITY
)
)
# check that the predictions from predict.lgb.Booster() really look like raw score predictions
# check that the predictions from predict.lgb.Booster() really look like raw score predictions
preds_prob
<-
predict
(
bst
,
X
)
preds_prob
<-
predict
(
bst
,
X
)
preds_raw_s3_keyword
<-
predict
(
bst
,
X
,
type
=
"raw"
)
preds_raw_s3_keyword
<-
predict
(
bst
,
X
,
type
=
"raw"
)
preds_prob_from_raw
<-
1.0
/
(
1.0
+
exp
(
-
preds_raw_s3_keyword
))
preds_prob_from_raw
<-
1.0
/
(
1.0
+
exp
(
-
preds_raw_s3_keyword
))
expect_equal
(
preds_prob
,
preds_prob_from_raw
,
tolerance
=
TOLERANCE
)
expect_equal
(
preds_prob
,
preds_prob_from_raw
,
tolerance
=
.LGB_NUMERIC_
TOLERANCE
)
accuracy
<-
sum
(
as.integer
(
preds_prob_from_raw
>
0.5
)
==
y
)
/
length
(
y
)
accuracy
<-
sum
(
as.integer
(
preds_prob_from_raw
>
0.5
)
==
y
)
/
length
(
y
)
expect_equal
(
accuracy
,
1.0
)
expect_equal
(
accuracy
,
1.0
)
...
@@ -269,7 +263,7 @@ test_that("predict() params should override keyword argument for leaf-index pred
...
@@ -269,7 +263,7 @@ test_that("predict() params should override keyword argument for leaf-index pred
,
num_threads
=
.LGB_MAX_THREADS
,
num_threads
=
.LGB_MAX_THREADS
)
)
,
nrounds
=
10L
,
nrounds
=
10L
,
verbose
=
VERBOSITY
,
verbose
=
.LGB_
VERBOSITY
)
)
# check that predictions really look like leaf index predictions
# check that predictions really look like leaf index predictions
...
@@ -323,7 +317,7 @@ test_that("predict() params should override keyword argument for feature contrib
...
@@ -323,7 +317,7 @@ test_that("predict() params should override keyword argument for feature contrib
,
num_threads
=
.LGB_MAX_THREADS
,
num_threads
=
.LGB_MAX_THREADS
)
)
,
nrounds
=
10L
,
nrounds
=
10L
,
verbose
=
VERBOSITY
,
verbose
=
.LGB_
VERBOSITY
)
)
# check that predictions really look like feature contributions
# check that predictions really look like feature contributions
...
@@ -431,7 +425,7 @@ test_that("predict() keeps row names from data (regression)", {
...
@@ -431,7 +425,7 @@ test_that("predict() keeps row names from data (regression)", {
data
=
dtrain
data
=
dtrain
,
obj
=
"regression"
,
obj
=
"regression"
,
nrounds
=
5L
,
nrounds
=
5L
,
verbose
=
VERBOSITY
,
verbose
=
.LGB_
VERBOSITY
,
params
=
list
(
min_data_in_leaf
=
1L
,
num_threads
=
.LGB_MAX_THREADS
)
,
params
=
list
(
min_data_in_leaf
=
1L
,
num_threads
=
.LGB_MAX_THREADS
)
)
)
.check_all_row_name_expectations
(
bst
,
X
)
.check_all_row_name_expectations
(
bst
,
X
)
...
@@ -447,7 +441,7 @@ test_that("predict() keeps row names from data (binary classification)", {
...
@@ -447,7 +441,7 @@ test_that("predict() keeps row names from data (binary classification)", {
data
=
dtrain
data
=
dtrain
,
obj
=
"binary"
,
obj
=
"binary"
,
nrounds
=
5L
,
nrounds
=
5L
,
verbose
=
VERBOSITY
,
verbose
=
.LGB_
VERBOSITY
,
params
=
list
(
num_threads
=
.LGB_MAX_THREADS
)
,
params
=
list
(
num_threads
=
.LGB_MAX_THREADS
)
)
)
.check_all_row_name_expectations
(
bst
,
X
)
.check_all_row_name_expectations
(
bst
,
X
)
...
@@ -464,7 +458,7 @@ test_that("predict() keeps row names from data (multi-class classification)", {
...
@@ -464,7 +458,7 @@ test_that("predict() keeps row names from data (multi-class classification)", {
,
obj
=
"multiclass"
,
obj
=
"multiclass"
,
params
=
list
(
num_class
=
3L
,
num_threads
=
.LGB_MAX_THREADS
)
,
params
=
list
(
num_class
=
3L
,
num_threads
=
.LGB_MAX_THREADS
)
,
nrounds
=
5L
,
nrounds
=
5L
,
verbose
=
VERBOSITY
,
verbose
=
.LGB_
VERBOSITY
)
)
.check_all_row_name_expectations
(
bst
,
X
)
.check_all_row_name_expectations
(
bst
,
X
)
})
})
...
@@ -485,7 +479,7 @@ test_that("predictions for regression and binary classification are returned as
...
@@ -485,7 +479,7 @@ test_that("predictions for regression and binary classification are returned as
data
=
dtrain
data
=
dtrain
,
obj
=
"regression"
,
obj
=
"regression"
,
nrounds
=
5L
,
nrounds
=
5L
,
verbose
=
VERBOSITY
,
verbose
=
.LGB_
VERBOSITY
,
params
=
list
(
min_data_in_leaf
=
1L
,
num_threads
=
.LGB_MAX_THREADS
)
,
params
=
list
(
min_data_in_leaf
=
1L
,
num_threads
=
.LGB_MAX_THREADS
)
)
)
pred
<-
predict
(
model
,
X
)
pred
<-
predict
(
model
,
X
)
...
@@ -503,7 +497,7 @@ test_that("predictions for regression and binary classification are returned as
...
@@ -503,7 +497,7 @@ test_that("predictions for regression and binary classification are returned as
data
=
dtrain
data
=
dtrain
,
obj
=
"binary"
,
obj
=
"binary"
,
nrounds
=
5L
,
nrounds
=
5L
,
verbose
=
VERBOSITY
,
verbose
=
.LGB_
VERBOSITY
,
params
=
list
(
num_threads
=
.LGB_MAX_THREADS
)
,
params
=
list
(
num_threads
=
.LGB_MAX_THREADS
)
)
)
pred
<-
predict
(
model
,
X
)
pred
<-
predict
(
model
,
X
)
...
@@ -523,7 +517,7 @@ test_that("predictions for multiclass classification are returned as matrix", {
...
@@ -523,7 +517,7 @@ test_that("predictions for multiclass classification are returned as matrix", {
data
=
dtrain
data
=
dtrain
,
obj
=
"multiclass"
,
obj
=
"multiclass"
,
nrounds
=
5L
,
nrounds
=
5L
,
verbose
=
VERBOSITY
,
verbose
=
.LGB_
VERBOSITY
,
params
=
list
(
num_class
=
3L
,
num_threads
=
.LGB_MAX_THREADS
)
,
params
=
list
(
num_class
=
3L
,
num_threads
=
.LGB_MAX_THREADS
)
)
)
pred
<-
predict
(
model
,
X
)
pred
<-
predict
(
model
,
X
)
...
@@ -668,7 +662,7 @@ test_that("predict type='class' returns predicted class for classification objec
...
@@ -668,7 +662,7 @@ test_that("predict type='class' returns predicted class for classification objec
data
=
dtrain
data
=
dtrain
,
obj
=
"binary"
,
obj
=
"binary"
,
nrounds
=
5L
,
nrounds
=
5L
,
verbose
=
VERBOSITY
,
verbose
=
.LGB_
VERBOSITY
,
params
=
list
(
num_threads
=
.LGB_MAX_THREADS
)
,
params
=
list
(
num_threads
=
.LGB_MAX_THREADS
)
)
)
pred
<-
predict
(
bst
,
X
,
type
=
"class"
)
pred
<-
predict
(
bst
,
X
,
type
=
"class"
)
...
@@ -682,7 +676,7 @@ test_that("predict type='class' returns predicted class for classification objec
...
@@ -682,7 +676,7 @@ test_that("predict type='class' returns predicted class for classification objec
data
=
dtrain
data
=
dtrain
,
obj
=
"multiclass"
,
obj
=
"multiclass"
,
nrounds
=
5L
,
nrounds
=
5L
,
verbose
=
VERBOSITY
,
verbose
=
.LGB_
VERBOSITY
,
params
=
list
(
num_class
=
3L
,
num_threads
=
.LGB_MAX_THREADS
)
,
params
=
list
(
num_class
=
3L
,
num_threads
=
.LGB_MAX_THREADS
)
)
)
pred
<-
predict
(
model
,
X
,
type
=
"class"
)
pred
<-
predict
(
model
,
X
,
type
=
"class"
)
...
@@ -698,7 +692,7 @@ test_that("predict type='class' returns values in the target's range for regress
...
@@ -698,7 +692,7 @@ test_that("predict type='class' returns values in the target's range for regress
data
=
dtrain
data
=
dtrain
,
obj
=
"regression"
,
obj
=
"regression"
,
nrounds
=
5L
,
nrounds
=
5L
,
verbose
=
VERBOSITY
,
verbose
=
.LGB_
VERBOSITY
,
params
=
list
(
num_threads
=
.LGB_MAX_THREADS
)
,
params
=
list
(
num_threads
=
.LGB_MAX_THREADS
)
)
)
pred
<-
predict
(
bst
,
X
,
type
=
"class"
)
pred
<-
predict
(
bst
,
X
,
type
=
"class"
)
...
...
R-package/tests/testthat/test_basic.R
View file @
44928d3a
This diff is collapsed.
Click to expand it.
R-package/tests/testthat/test_custom_objective.R
View file @
44928d3a
VERBOSITY
<-
as.integer
(
Sys.getenv
(
"LIGHTGBM_TEST_VERBOSITY"
,
"-1"
)
)
data
(
agaricus.train
,
package
=
"lightgbm"
)
data
(
agaricus.train
,
package
=
"lightgbm"
)
data
(
agaricus.test
,
package
=
"lightgbm"
)
data
(
agaricus.test
,
package
=
"lightgbm"
)
dtrain
<-
lgb.Dataset
(
agaricus.train
$
data
,
label
=
agaricus.train
$
label
)
dtrain
<-
lgb.Dataset
(
agaricus.train
$
data
,
label
=
agaricus.train
$
label
)
dtest
<-
lgb.Dataset
(
agaricus.test
$
data
,
label
=
agaricus.test
$
label
)
dtest
<-
lgb.Dataset
(
agaricus.test
$
data
,
label
=
agaricus.test
$
label
)
watchlist
<-
list
(
eval
=
dtest
,
train
=
dtrain
)
watchlist
<-
list
(
eval
=
dtest
,
train
=
dtrain
)
TOLERANCE
<-
1e-6
logregobj
<-
function
(
preds
,
dtrain
)
{
logregobj
<-
function
(
preds
,
dtrain
)
{
labels
<-
get_field
(
dtrain
,
"label"
)
labels
<-
get_field
(
dtrain
,
"label"
)
preds
<-
1.0
/
(
1.0
+
exp
(
-
preds
))
preds
<-
1.0
/
(
1.0
+
exp
(
-
preds
))
...
@@ -38,7 +32,7 @@ param <- list(
...
@@ -38,7 +32,7 @@ param <- list(
,
learning_rate
=
1.0
,
learning_rate
=
1.0
,
objective
=
logregobj
,
objective
=
logregobj
,
metric
=
"auc"
,
metric
=
"auc"
,
verbose
=
VERBOSITY
,
verbose
=
.LGB_
VERBOSITY
,
num_threads
=
.LGB_MAX_THREADS
,
num_threads
=
.LGB_MAX_THREADS
)
)
num_round
<-
10L
num_round
<-
10L
...
@@ -54,7 +48,7 @@ test_that("using a custom objective, custom eval, and no other metrics works", {
...
@@ -54,7 +48,7 @@ test_that("using a custom objective, custom eval, and no other metrics works", {
params
=
list
(
params
=
list
(
num_leaves
=
8L
num_leaves
=
8L
,
learning_rate
=
1.0
,
learning_rate
=
1.0
,
verbose
=
VERBOSITY
,
verbose
=
.LGB_
VERBOSITY
,
num_threads
=
.LGB_MAX_THREADS
,
num_threads
=
.LGB_MAX_THREADS
)
)
,
data
=
dtrain
,
data
=
dtrain
...
@@ -65,11 +59,11 @@ test_that("using a custom objective, custom eval, and no other metrics works", {
...
@@ -65,11 +59,11 @@ test_that("using a custom objective, custom eval, and no other metrics works", {
)
)
expect_false
(
is.null
(
bst
$
record_evals
))
expect_false
(
is.null
(
bst
$
record_evals
))
expect_equal
(
bst
$
best_iter
,
4L
)
expect_equal
(
bst
$
best_iter
,
4L
)
expect_true
(
abs
(
bst
$
best_score
-
0.000621
)
<
TOLERANCE
)
expect_true
(
abs
(
bst
$
best_score
-
0.000621
)
<
.LGB_NUMERIC_
TOLERANCE
)
eval_results
<-
bst
$
eval_valid
(
feval
=
evalerror
)[[
1L
]]
eval_results
<-
bst
$
eval_valid
(
feval
=
evalerror
)[[
1L
]]
expect_true
(
eval_results
[[
"data_name"
]]
==
"eval"
)
expect_true
(
eval_results
[[
"data_name"
]]
==
"eval"
)
expect_true
(
abs
(
eval_results
[[
"value"
]]
-
0.0006207325
)
<
TOLERANCE
)
expect_true
(
abs
(
eval_results
[[
"value"
]]
-
0.0006207325
)
<
.LGB_NUMERIC_
TOLERANCE
)
expect_true
(
eval_results
[[
"name"
]]
==
"error"
)
expect_true
(
eval_results
[[
"name"
]]
==
"error"
)
expect_false
(
eval_results
[[
"higher_better"
]])
expect_false
(
eval_results
[[
"higher_better"
]])
})
})
...
@@ -81,7 +75,7 @@ test_that("using a custom objective that returns wrong shape grad or hess raises
...
@@ -81,7 +75,7 @@ test_that("using a custom objective that returns wrong shape grad or hess raises
bad_hess
<-
function
(
preds
,
dtrain
)
{
bad_hess
<-
function
(
preds
,
dtrain
)
{
return
(
list
(
grad
=
rep
(
1.0
,
length
(
preds
)),
hess
=
numeric
(
0L
)))
return
(
list
(
grad
=
rep
(
1.0
,
length
(
preds
)),
hess
=
numeric
(
0L
)))
}
}
params
<-
list
(
num_leaves
=
3L
,
verbose
=
VERBOSITY
)
params
<-
list
(
num_leaves
=
3L
,
verbose
=
.LGB_
VERBOSITY
)
expect_error
({
expect_error
({
lgb.train
(
params
=
params
,
data
=
dtrain
,
obj
=
bad_grad
)
lgb.train
(
params
=
params
,
data
=
dtrain
,
obj
=
bad_grad
)
},
sprintf
(
"Expected custom objective function to return grad with length %d, got 0."
,
nrow
(
dtrain
)))
},
sprintf
(
"Expected custom objective function to return grad with length %d, got 0."
,
nrow
(
dtrain
)))
...
...
R-package/tests/testthat/test_dataset.R
View file @
44928d3a
VERBOSITY
<-
as.integer
(
Sys.getenv
(
"LIGHTGBM_TEST_VERBOSITY"
,
"-1"
)
)
data
(
agaricus.train
,
package
=
"lightgbm"
)
data
(
agaricus.train
,
package
=
"lightgbm"
)
train_data
<-
agaricus.train
$
data
[
seq_len
(
1000L
),
]
train_data
<-
agaricus.train
$
data
[
seq_len
(
1000L
),
]
train_label
<-
agaricus.train
$
label
[
seq_len
(
1000L
)]
train_label
<-
agaricus.train
$
label
[
seq_len
(
1000L
)]
...
@@ -16,7 +12,7 @@ test_that("lgb.Dataset: basic construction, saving, loading", {
...
@@ -16,7 +12,7 @@ test_that("lgb.Dataset: basic construction, saving, loading", {
test_data
test_data
,
label
=
test_label
,
label
=
test_label
,
params
=
list
(
,
params
=
list
(
verbose
=
VERBOSITY
verbose
=
.LGB_
VERBOSITY
)
)
)
)
# from dense matrix
# from dense matrix
...
@@ -30,7 +26,7 @@ test_that("lgb.Dataset: basic construction, saving, loading", {
...
@@ -30,7 +26,7 @@ test_that("lgb.Dataset: basic construction, saving, loading", {
dtest3
<-
lgb.Dataset
(
dtest3
<-
lgb.Dataset
(
tmp_file
tmp_file
,
params
=
list
(
,
params
=
list
(
verbose
=
VERBOSITY
verbose
=
.LGB_
VERBOSITY
)
)
)
)
lgb.Dataset.construct
(
dtest3
)
lgb.Dataset.construct
(
dtest3
)
...
@@ -376,7 +372,7 @@ test_that("lgb.Dataset: should be able to run lgb.train() immediately after usin
...
@@ -376,7 +372,7 @@ test_that("lgb.Dataset: should be able to run lgb.train() immediately after usin
data
=
test_data
data
=
test_data
,
label
=
test_label
,
label
=
test_label
,
params
=
list
(
,
params
=
list
(
verbose
=
VERBOSITY
verbose
=
.LGB_
VERBOSITY
)
)
)
)
tmp_file
<-
tempfile
(
pattern
=
"lgb.Dataset_"
)
tmp_file
<-
tempfile
(
pattern
=
"lgb.Dataset_"
)
...
@@ -393,7 +389,7 @@ test_that("lgb.Dataset: should be able to run lgb.train() immediately after usin
...
@@ -393,7 +389,7 @@ test_that("lgb.Dataset: should be able to run lgb.train() immediately after usin
,
metric
=
"binary_logloss"
,
metric
=
"binary_logloss"
,
num_leaves
=
5L
,
num_leaves
=
5L
,
learning_rate
=
1.0
,
learning_rate
=
1.0
,
verbose
=
VERBOSITY
,
verbose
=
.LGB_
VERBOSITY
,
num_threads
=
.LGB_MAX_THREADS
,
num_threads
=
.LGB_MAX_THREADS
)
)
...
@@ -411,7 +407,7 @@ test_that("lgb.Dataset: should be able to run lgb.cv() immediately after using l
...
@@ -411,7 +407,7 @@ test_that("lgb.Dataset: should be able to run lgb.cv() immediately after using l
data
=
test_data
data
=
test_data
,
label
=
test_label
,
label
=
test_label
,
params
=
list
(
,
params
=
list
(
verbosity
=
VERBOSITY
verbosity
=
.LGB_
VERBOSITY
)
)
)
)
tmp_file
<-
tempfile
(
pattern
=
"lgb.Dataset_"
)
tmp_file
<-
tempfile
(
pattern
=
"lgb.Dataset_"
)
...
@@ -429,7 +425,7 @@ test_that("lgb.Dataset: should be able to run lgb.cv() immediately after using l
...
@@ -429,7 +425,7 @@ test_that("lgb.Dataset: should be able to run lgb.cv() immediately after using l
,
num_leaves
=
5L
,
num_leaves
=
5L
,
learning_rate
=
1.0
,
learning_rate
=
1.0
,
num_iterations
=
5L
,
num_iterations
=
5L
,
verbosity
=
VERBOSITY
,
verbosity
=
.LGB_
VERBOSITY
,
num_threads
=
.LGB_MAX_THREADS
,
num_threads
=
.LGB_MAX_THREADS
)
)
...
@@ -475,7 +471,7 @@ test_that("lgb.Dataset: should be able to create a Dataset from a text file with
...
@@ -475,7 +471,7 @@ test_that("lgb.Dataset: should be able to create a Dataset from a text file with
data
=
train_file
data
=
train_file
,
params
=
list
(
,
params
=
list
(
header
=
TRUE
header
=
TRUE
,
verbosity
=
VERBOSITY
,
verbosity
=
.LGB_
VERBOSITY
)
)
)
)
dtrain
$
construct
()
dtrain
$
construct
()
...
@@ -499,7 +495,7 @@ test_that("lgb.Dataset: should be able to create a Dataset from a text file with
...
@@ -499,7 +495,7 @@ test_that("lgb.Dataset: should be able to create a Dataset from a text file with
data
=
train_file
data
=
train_file
,
params
=
list
(
,
params
=
list
(
header
=
FALSE
header
=
FALSE
,
verbosity
=
VERBOSITY
,
verbosity
=
.LGB_
VERBOSITY
)
)
)
)
dtrain
$
construct
()
dtrain
$
construct
()
...
...
R-package/tests/testthat/test_learning_to_rank.R
View file @
44928d3a
VERBOSITY
<-
as.integer
(
Sys.getenv
(
"LIGHTGBM_TEST_VERBOSITY"
,
"-1"
)
)
# numerical tolerance to use when checking metric values
TOLERANCE
<-
1e-06
ON_32_BIT_WINDOWS
<-
.Platform
$
OS.type
==
"windows"
&&
.Machine
$
sizeof.pointer
!=
8L
test_that
(
"learning-to-rank with lgb.train() works as expected"
,
{
test_that
(
"learning-to-rank with lgb.train() works as expected"
,
{
set.seed
(
708L
)
set.seed
(
708L
)
data
(
agaricus.train
,
package
=
"lightgbm"
)
data
(
agaricus.train
,
package
=
"lightgbm"
)
...
@@ -26,7 +17,7 @@ test_that("learning-to-rank with lgb.train() works as expected", {
...
@@ -26,7 +17,7 @@ test_that("learning-to-rank with lgb.train() works as expected", {
,
ndcg_at
=
ndcg_at
,
ndcg_at
=
ndcg_at
,
lambdarank_truncation_level
=
3L
,
lambdarank_truncation_level
=
3L
,
learning_rate
=
0.001
,
learning_rate
=
0.001
,
verbose
=
VERBOSITY
,
verbose
=
.LGB_
VERBOSITY
,
num_threads
=
.LGB_MAX_THREADS
,
num_threads
=
.LGB_MAX_THREADS
)
)
model
<-
lgb.train
(
model
<-
lgb.train
(
...
@@ -60,15 +51,15 @@ test_that("learning-to-rank with lgb.train() works as expected", {
...
@@ -60,15 +51,15 @@ test_that("learning-to-rank with lgb.train() works as expected", {
,
eval_names
,
eval_names
)
)
expect_equal
(
eval_results
[[
1L
]][[
"value"
]],
0.775
)
expect_equal
(
eval_results
[[
1L
]][[
"value"
]],
0.775
)
if
(
!
ON_32_BIT_WINDOWS
)
{
if
(
!
.LGB_
ON_32_BIT_WINDOWS
)
{
expect_true
(
abs
(
eval_results
[[
2L
]][[
"value"
]]
-
0.745986
)
<
TOLERANCE
)
expect_true
(
abs
(
eval_results
[[
2L
]][[
"value"
]]
-
0.745986
)
<
.LGB_NUMERIC_
TOLERANCE
)
expect_true
(
abs
(
eval_results
[[
3L
]][[
"value"
]]
-
0.7351959
)
<
TOLERANCE
)
expect_true
(
abs
(
eval_results
[[
3L
]][[
"value"
]]
-
0.7351959
)
<
.LGB_NUMERIC_
TOLERANCE
)
}
}
})
})
test_that
(
"learning-to-rank with lgb.cv() works as expected"
,
{
test_that
(
"learning-to-rank with lgb.cv() works as expected"
,
{
testthat
::
skip_if
(
testthat
::
skip_if
(
ON_32_BIT_WINDOWS
.LGB_
ON_32_BIT_WINDOWS
,
message
=
"Skipping on 32-bit Windows"
,
message
=
"Skipping on 32-bit Windows"
)
)
set.seed
(
708L
)
set.seed
(
708L
)
...
@@ -91,7 +82,7 @@ test_that("learning-to-rank with lgb.cv() works as expected", {
...
@@ -91,7 +82,7 @@ test_that("learning-to-rank with lgb.cv() works as expected", {
,
label_gain
=
"0,1,3"
,
label_gain
=
"0,1,3"
,
min_data
=
1L
,
min_data
=
1L
,
learning_rate
=
0.01
,
learning_rate
=
0.01
,
verbose
=
VERBOSITY
,
verbose
=
.LGB_
VERBOSITY
,
num_threads
=
.LGB_MAX_THREADS
,
num_threads
=
.LGB_MAX_THREADS
)
)
nfold
<-
4L
nfold
<-
4L
...
@@ -115,7 +106,7 @@ test_that("learning-to-rank with lgb.cv() works as expected", {
...
@@ -115,7 +106,7 @@ test_that("learning-to-rank with lgb.cv() works as expected", {
best_score
<-
cv_bst
$
best_score
best_score
<-
cv_bst
$
best_score
expect_true
(
best_iter
>
0L
&&
best_iter
<=
nrounds
)
expect_true
(
best_iter
>
0L
&&
best_iter
<=
nrounds
)
expect_true
(
best_score
>
0.0
&&
best_score
<
1.0
)
expect_true
(
best_score
>
0.0
&&
best_score
<
1.0
)
expect_true
(
abs
(
best_score
-
0.75
)
<
TOLERANCE
)
expect_true
(
abs
(
best_score
-
0.75
)
<
.LGB_NUMERIC_
TOLERANCE
)
# best_score should be set for the first metric
# best_score should be set for the first metric
first_metric
<-
eval_names
[[
1L
]]
first_metric
<-
eval_names
[[
1L
]]
...
@@ -138,19 +129,19 @@ test_that("learning-to-rank with lgb.cv() works as expected", {
...
@@ -138,19 +129,19 @@ test_that("learning-to-rank with lgb.cv() works as expected", {
# first and last value of each metric should be as expected
# first and last value of each metric should be as expected
ndcg1_values
<-
c
(
0.675
,
0.725
,
0.65
,
0.725
,
0.75
,
0.725
,
0.75
,
0.725
,
0.75
,
0.75
)
ndcg1_values
<-
c
(
0.675
,
0.725
,
0.65
,
0.725
,
0.75
,
0.725
,
0.75
,
0.725
,
0.75
,
0.75
)
expect_true
(
all
(
abs
(
unlist
(
eval_results
[[
"ndcg@1"
]][[
"eval"
]])
-
ndcg1_values
)
<
TOLERANCE
))
expect_true
(
all
(
abs
(
unlist
(
eval_results
[[
"ndcg@1"
]][[
"eval"
]])
-
ndcg1_values
)
<
.LGB_NUMERIC_
TOLERANCE
))
ndcg2_values
<-
c
(
ndcg2_values
<-
c
(
0.6556574
,
0.6669721
,
0.6306574
,
0.6476294
,
0.6629581
,
0.6556574
,
0.6669721
,
0.6306574
,
0.6476294
,
0.6629581
,
0.6476294
,
0.6629581
,
0.6379581
,
0.7113147
,
0.6823008
0.6476294
,
0.6629581
,
0.6379581
,
0.7113147
,
0.6823008
)
)
expect_true
(
all
(
abs
(
unlist
(
eval_results
[[
"ndcg@2"
]][[
"eval"
]])
-
ndcg2_values
)
<
TOLERANCE
))
expect_true
(
all
(
abs
(
unlist
(
eval_results
[[
"ndcg@2"
]][[
"eval"
]])
-
ndcg2_values
)
<
.LGB_NUMERIC_
TOLERANCE
))
ndcg3_values
<-
c
(
ndcg3_values
<-
c
(
0.6484639
,
0.6571238
,
0.6469279
,
0.6540516
,
0.6481857
,
0.6484639
,
0.6571238
,
0.6469279
,
0.6540516
,
0.6481857
,
0.6481857
,
0.6481857
,
0.6466496
,
0.7027939
,
0.6629898
0.6481857
,
0.6481857
,
0.6466496
,
0.7027939
,
0.6629898
)
)
expect_true
(
all
(
abs
(
unlist
(
eval_results
[[
"ndcg@3"
]][[
"eval"
]])
-
ndcg3_values
)
<
TOLERANCE
))
expect_true
(
all
(
abs
(
unlist
(
eval_results
[[
"ndcg@3"
]][[
"eval"
]])
-
ndcg3_values
)
<
.LGB_NUMERIC_
TOLERANCE
))
# check details of each booster
# check details of each booster
for
(
bst
in
cv_bst
$
boosters
)
{
for
(
bst
in
cv_bst
$
boosters
)
{
...
...
R-package/tests/testthat/test_lgb.Booster.R
View file @
44928d3a
VERBOSITY
<-
as.integer
(
Sys.getenv
(
"LIGHTGBM_TEST_VERBOSITY"
,
"-1"
)
)
ON_WINDOWS
<-
.Platform
$
OS.type
==
"windows"
TOLERANCE
<-
1e-6
test_that
(
"Booster$finalize() should not fail"
,
{
test_that
(
"Booster$finalize() should not fail"
,
{
X
<-
as.matrix
(
as.integer
(
iris
[,
"Species"
]),
ncol
=
1L
)
X
<-
as.matrix
(
as.integer
(
iris
[,
"Species"
]),
ncol
=
1L
)
y
<-
iris
[[
"Sepal.Length"
]]
y
<-
iris
[[
"Sepal.Length"
]]
...
@@ -15,7 +8,7 @@ test_that("Booster$finalize() should not fail", {
...
@@ -15,7 +8,7 @@ test_that("Booster$finalize() should not fail", {
objective
=
"regression"
objective
=
"regression"
,
num_threads
=
.LGB_MAX_THREADS
,
num_threads
=
.LGB_MAX_THREADS
)
)
,
verbose
=
VERBOSITY
,
verbose
=
.LGB_
VERBOSITY
,
nrounds
=
3L
,
nrounds
=
3L
)
)
expect_true
(
lgb.is.Booster
(
bst
))
expect_true
(
lgb.is.Booster
(
bst
))
...
@@ -66,7 +59,7 @@ test_that("lgb.get.eval.result() should throw an informative error for incorrect
...
@@ -66,7 +59,7 @@ test_that("lgb.get.eval.result() should throw an informative error for incorrect
,
metric
=
"l2"
,
metric
=
"l2"
,
min_data
=
1L
,
min_data
=
1L
,
learning_rate
=
1.0
,
learning_rate
=
1.0
,
verbose
=
VERBOSITY
,
verbose
=
.LGB_
VERBOSITY
,
num_threads
=
.LGB_MAX_THREADS
,
num_threads
=
.LGB_MAX_THREADS
)
)
,
data
=
dtrain
,
data
=
dtrain
...
@@ -101,7 +94,7 @@ test_that("lgb.get.eval.result() should throw an informative error for incorrect
...
@@ -101,7 +94,7 @@ test_that("lgb.get.eval.result() should throw an informative error for incorrect
,
metric
=
"l2"
,
metric
=
"l2"
,
min_data
=
1L
,
min_data
=
1L
,
learning_rate
=
1.0
,
learning_rate
=
1.0
,
verbose
=
VERBOSITY
,
verbose
=
.LGB_
VERBOSITY
)
)
,
data
=
dtrain
,
data
=
dtrain
,
nrounds
=
5L
,
nrounds
=
5L
...
@@ -135,7 +128,7 @@ test_that("lgb.load() gives the expected error messages given different incorrec
...
@@ -135,7 +128,7 @@ test_that("lgb.load() gives the expected error messages given different incorrec
objective
=
"binary"
objective
=
"binary"
,
num_leaves
=
4L
,
num_leaves
=
4L
,
learning_rate
=
1.0
,
learning_rate
=
1.0
,
verbose
=
VERBOSITY
,
verbose
=
.LGB_
VERBOSITY
)
)
,
nrounds
=
2L
,
nrounds
=
2L
)
)
...
@@ -186,7 +179,7 @@ test_that("Loading a Booster from a text file works", {
...
@@ -186,7 +179,7 @@ test_that("Loading a Booster from a text file works", {
,
metric
=
c
(
"mape"
,
"average_precision"
)
,
metric
=
c
(
"mape"
,
"average_precision"
)
,
learning_rate
=
1.0
,
learning_rate
=
1.0
,
objective
=
"binary"
,
objective
=
"binary"
,
verbosity
=
VERBOSITY
,
verbosity
=
.LGB_
VERBOSITY
,
num_threads
=
.LGB_MAX_THREADS
,
num_threads
=
.LGB_MAX_THREADS
)
)
bst
<-
lightgbm
(
bst
<-
lightgbm
(
...
@@ -237,7 +230,7 @@ test_that("boosters with linear models at leaves can be written to text file and
...
@@ -237,7 +230,7 @@ test_that("boosters with linear models at leaves can be written to text file and
data
=
dtrain
data
=
dtrain
,
nrounds
=
10L
,
nrounds
=
10L
,
params
=
params
,
params
=
params
,
verbose
=
VERBOSITY
,
verbose
=
.LGB_
VERBOSITY
)
)
expect_true
(
lgb.is.Booster
(
bst
))
expect_true
(
lgb.is.Booster
(
bst
))
...
@@ -271,7 +264,7 @@ test_that("Loading a Booster from a string works", {
...
@@ -271,7 +264,7 @@ test_that("Loading a Booster from a string works", {
num_leaves
=
4L
num_leaves
=
4L
,
learning_rate
=
1.0
,
learning_rate
=
1.0
,
objective
=
"binary"
,
objective
=
"binary"
,
verbose
=
VERBOSITY
,
verbose
=
.LGB_
VERBOSITY
,
num_threads
=
.LGB_MAX_THREADS
,
num_threads
=
.LGB_MAX_THREADS
)
)
,
nrounds
=
2L
,
nrounds
=
2L
...
@@ -307,7 +300,7 @@ test_that("Saving a large model to string should work", {
...
@@ -307,7 +300,7 @@ test_that("Saving a large model to string should work", {
,
num_threads
=
.LGB_MAX_THREADS
,
num_threads
=
.LGB_MAX_THREADS
)
)
,
nrounds
=
500L
,
nrounds
=
500L
,
verbose
=
VERBOSITY
,
verbose
=
.LGB_
VERBOSITY
)
)
pred
<-
predict
(
bst
,
train
$
data
)
pred
<-
predict
(
bst
,
train
$
data
)
...
@@ -351,7 +344,7 @@ test_that("Saving a large model to JSON should work", {
...
@@ -351,7 +344,7 @@ test_that("Saving a large model to JSON should work", {
,
num_threads
=
.LGB_MAX_THREADS
,
num_threads
=
.LGB_MAX_THREADS
)
)
,
nrounds
=
200L
,
nrounds
=
200L
,
verbose
=
VERBOSITY
,
verbose
=
.LGB_
VERBOSITY
)
)
model_json
<-
bst
$
dump_model
()
model_json
<-
bst
$
dump_model
()
...
@@ -378,7 +371,7 @@ test_that("If a string and a file are both passed to lgb.load() the file is used
...
@@ -378,7 +371,7 @@ test_that("If a string and a file are both passed to lgb.load() the file is used
num_leaves
=
4L
num_leaves
=
4L
,
learning_rate
=
1.0
,
learning_rate
=
1.0
,
objective
=
"binary"
,
objective
=
"binary"
,
verbose
=
VERBOSITY
,
verbose
=
.LGB_
VERBOSITY
,
num_threads
=
.LGB_MAX_THREADS
,
num_threads
=
.LGB_MAX_THREADS
)
)
,
nrounds
=
2L
,
nrounds
=
2L
...
@@ -413,7 +406,7 @@ test_that("Creating a Booster from a Dataset should work", {
...
@@ -413,7 +406,7 @@ test_that("Creating a Booster from a Dataset should work", {
bst
<-
Booster
$
new
(
bst
<-
Booster
$
new
(
params
=
list
(
params
=
list
(
objective
=
"binary"
objective
=
"binary"
,
verbose
=
VERBOSITY
,
verbose
=
.LGB_
VERBOSITY
,
num_threads
=
.LGB_MAX_THREADS
,
num_threads
=
.LGB_MAX_THREADS
),
),
train_set
=
dtrain
train_set
=
dtrain
...
@@ -435,7 +428,7 @@ test_that("Creating a Booster from a Dataset with an existing predictor should w
...
@@ -435,7 +428,7 @@ test_that("Creating a Booster from a Dataset with an existing predictor should w
num_leaves
=
4L
num_leaves
=
4L
,
learning_rate
=
1.0
,
learning_rate
=
1.0
,
objective
=
"binary"
,
objective
=
"binary"
,
verbose
=
VERBOSITY
,
verbose
=
.LGB_
VERBOSITY
,
num_threads
=
.LGB_MAX_THREADS
,
num_threads
=
.LGB_MAX_THREADS
)
)
,
nrounds
=
nrounds
,
nrounds
=
nrounds
...
@@ -449,7 +442,7 @@ test_that("Creating a Booster from a Dataset with an existing predictor should w
...
@@ -449,7 +442,7 @@ test_that("Creating a Booster from a Dataset with an existing predictor should w
bst_from_ds
<-
Booster
$
new
(
bst_from_ds
<-
Booster
$
new
(
train_set
=
dtest
train_set
=
dtest
,
params
=
list
(
,
params
=
list
(
verbose
=
VERBOSITY
verbose
=
.LGB_
VERBOSITY
,
num_threads
=
.LGB_MAX_THREADS
,
num_threads
=
.LGB_MAX_THREADS
)
)
)
)
...
@@ -473,7 +466,7 @@ test_that("Booster$eval() should work on a Dataset stored in a binary file", {
...
@@ -473,7 +466,7 @@ test_that("Booster$eval() should work on a Dataset stored in a binary file", {
objective
=
"regression"
objective
=
"regression"
,
metric
=
"l2"
,
metric
=
"l2"
,
num_leaves
=
4L
,
num_leaves
=
4L
,
verbose
=
VERBOSITY
,
verbose
=
.LGB_
VERBOSITY
,
num_threads
=
.LGB_MAX_THREADS
,
num_threads
=
.LGB_MAX_THREADS
)
)
,
data
=
dtrain
,
data
=
dtrain
...
@@ -504,14 +497,14 @@ test_that("Booster$eval() should work on a Dataset stored in a binary file", {
...
@@ -504,14 +497,14 @@ test_that("Booster$eval() should work on a Dataset stored in a binary file", {
eval_from_file
<-
bst
$
eval
(
eval_from_file
<-
bst
$
eval
(
data
=
lgb.Dataset
(
data
=
lgb.Dataset
(
data
=
test_file
data
=
test_file
,
params
=
list
(
verbose
=
VERBOSITY
,
num_threads
=
.LGB_MAX_THREADS
)
,
params
=
list
(
verbose
=
.LGB_
VERBOSITY
,
num_threads
=
.LGB_MAX_THREADS
)
)
$
construct
()
)
$
construct
()
,
name
=
"test"
,
name
=
"test"
)
)
expect_true
(
abs
(
eval_in_mem
[[
1L
]][[
"value"
]]
-
0.1744423
)
<
TOLERANCE
)
expect_true
(
abs
(
eval_in_mem
[[
1L
]][[
"value"
]]
-
0.1744423
)
<
.LGB_NUMERIC_
TOLERANCE
)
# refer to https://github.com/microsoft/LightGBM/issues/4680
# refer to https://github.com/microsoft/LightGBM/issues/4680
if
(
isTRUE
(
ON_WINDOWS
))
{
if
(
isTRUE
(
.LGB_
ON_WINDOWS
))
{
expect_equal
(
eval_in_mem
,
eval_from_file
)
expect_equal
(
eval_in_mem
,
eval_from_file
)
}
else
{
}
else
{
expect_identical
(
eval_in_mem
,
eval_from_file
)
expect_identical
(
eval_in_mem
,
eval_from_file
)
...
@@ -532,7 +525,7 @@ test_that("Booster$rollback_one_iter() should work as expected", {
...
@@ -532,7 +525,7 @@ test_that("Booster$rollback_one_iter() should work as expected", {
num_leaves
=
4L
num_leaves
=
4L
,
learning_rate
=
1.0
,
learning_rate
=
1.0
,
objective
=
"binary"
,
objective
=
"binary"
,
verbose
=
VERBOSITY
,
verbose
=
.LGB_
VERBOSITY
,
num_threads
=
.LGB_MAX_THREADS
,
num_threads
=
.LGB_MAX_THREADS
)
)
,
nrounds
=
nrounds
,
nrounds
=
nrounds
...
@@ -567,7 +560,7 @@ test_that("Booster$update() passing a train_set works as expected", {
...
@@ -567,7 +560,7 @@ test_that("Booster$update() passing a train_set works as expected", {
num_leaves
=
4L
num_leaves
=
4L
,
learning_rate
=
1.0
,
learning_rate
=
1.0
,
objective
=
"binary"
,
objective
=
"binary"
,
verbose
=
VERBOSITY
,
verbose
=
.LGB_
VERBOSITY
,
num_threads
=
.LGB_MAX_THREADS
,
num_threads
=
.LGB_MAX_THREADS
)
)
,
nrounds
=
nrounds
,
nrounds
=
nrounds
...
@@ -578,7 +571,7 @@ test_that("Booster$update() passing a train_set works as expected", {
...
@@ -578,7 +571,7 @@ test_that("Booster$update() passing a train_set works as expected", {
train_set
=
Dataset
$
new
(
train_set
=
Dataset
$
new
(
data
=
agaricus.train
$
data
data
=
agaricus.train
$
data
,
label
=
agaricus.train
$
label
,
label
=
agaricus.train
$
label
,
params
=
list
(
verbose
=
VERBOSITY
)
,
params
=
list
(
verbose
=
.LGB_
VERBOSITY
)
)
)
)
)
expect_true
(
lgb.is.Booster
(
bst
))
expect_true
(
lgb.is.Booster
(
bst
))
...
@@ -592,7 +585,7 @@ test_that("Booster$update() passing a train_set works as expected", {
...
@@ -592,7 +585,7 @@ test_that("Booster$update() passing a train_set works as expected", {
num_leaves
=
4L
num_leaves
=
4L
,
learning_rate
=
1.0
,
learning_rate
=
1.0
,
objective
=
"binary"
,
objective
=
"binary"
,
verbose
=
VERBOSITY
,
verbose
=
.LGB_
VERBOSITY
,
num_threads
=
.LGB_MAX_THREADS
,
num_threads
=
.LGB_MAX_THREADS
)
)
,
nrounds
=
nrounds
+
1L
,
nrounds
=
nrounds
+
1L
...
@@ -618,7 +611,7 @@ test_that("Booster$update() throws an informative error if you provide a non-Dat
...
@@ -618,7 +611,7 @@ test_that("Booster$update() throws an informative error if you provide a non-Dat
num_leaves
=
4L
num_leaves
=
4L
,
learning_rate
=
1.0
,
learning_rate
=
1.0
,
objective
=
"binary"
,
objective
=
"binary"
,
verbose
=
VERBOSITY
,
verbose
=
.LGB_
VERBOSITY
,
num_threads
=
.LGB_MAX_THREADS
,
num_threads
=
.LGB_MAX_THREADS
)
)
,
nrounds
=
nrounds
,
nrounds
=
nrounds
...
@@ -646,7 +639,7 @@ test_that("Booster should store parameters and Booster$reset_parameter() should
...
@@ -646,7 +639,7 @@ test_that("Booster should store parameters and Booster$reset_parameter() should
,
metric
=
c
(
"multi_logloss"
,
"multi_error"
)
,
metric
=
c
(
"multi_logloss"
,
"multi_error"
)
,
boosting
=
"gbdt"
,
boosting
=
"gbdt"
,
num_class
=
5L
,
num_class
=
5L
,
verbose
=
VERBOSITY
,
verbose
=
.LGB_
VERBOSITY
,
num_threads
=
.LGB_MAX_THREADS
,
num_threads
=
.LGB_MAX_THREADS
)
)
bst
<-
Booster
$
new
(
bst
<-
Booster
$
new
(
...
@@ -674,7 +667,7 @@ test_that("Booster$params should include dataset params, before and after Booste
...
@@ -674,7 +667,7 @@ test_that("Booster$params should include dataset params, before and after Booste
objective
=
"binary"
objective
=
"binary"
,
max_depth
=
4L
,
max_depth
=
4L
,
bagging_fraction
=
0.8
,
bagging_fraction
=
0.8
,
verbose
=
VERBOSITY
,
verbose
=
.LGB_
VERBOSITY
,
num_threads
=
.LGB_MAX_THREADS
,
num_threads
=
.LGB_MAX_THREADS
)
)
bst
<-
Booster
$
new
(
bst
<-
Booster
$
new
(
...
@@ -687,7 +680,7 @@ test_that("Booster$params should include dataset params, before and after Booste
...
@@ -687,7 +680,7 @@ test_that("Booster$params should include dataset params, before and after Booste
objective
=
"binary"
objective
=
"binary"
,
max_depth
=
4L
,
max_depth
=
4L
,
bagging_fraction
=
0.8
,
bagging_fraction
=
0.8
,
verbose
=
VERBOSITY
,
verbose
=
.LGB_
VERBOSITY
,
num_threads
=
.LGB_MAX_THREADS
,
num_threads
=
.LGB_MAX_THREADS
,
max_bin
=
17L
,
max_bin
=
17L
)
)
...
@@ -699,7 +692,7 @@ test_that("Booster$params should include dataset params, before and after Booste
...
@@ -699,7 +692,7 @@ test_that("Booster$params should include dataset params, before and after Booste
objective
=
"binary"
objective
=
"binary"
,
max_depth
=
4L
,
max_depth
=
4L
,
bagging_fraction
=
0.9
,
bagging_fraction
=
0.9
,
verbose
=
VERBOSITY
,
verbose
=
.LGB_
VERBOSITY
,
num_threads
=
.LGB_MAX_THREADS
,
num_threads
=
.LGB_MAX_THREADS
,
max_bin
=
17L
,
max_bin
=
17L
)
)
...
@@ -718,7 +711,7 @@ test_that("Saving a model with different feature importance types works", {
...
@@ -718,7 +711,7 @@ test_that("Saving a model with different feature importance types works", {
num_leaves
=
4L
num_leaves
=
4L
,
learning_rate
=
1.0
,
learning_rate
=
1.0
,
objective
=
"binary"
,
objective
=
"binary"
,
verbose
=
VERBOSITY
,
verbose
=
.LGB_
VERBOSITY
,
num_threads
=
.LGB_MAX_THREADS
,
num_threads
=
.LGB_MAX_THREADS
)
)
,
nrounds
=
2L
,
nrounds
=
2L
...
@@ -774,7 +767,7 @@ test_that("Saving a model with unknown importance type fails", {
...
@@ -774,7 +767,7 @@ test_that("Saving a model with unknown importance type fails", {
num_leaves
=
4L
num_leaves
=
4L
,
learning_rate
=
1.0
,
learning_rate
=
1.0
,
objective
=
"binary"
,
objective
=
"binary"
,
verbose
=
VERBOSITY
,
verbose
=
.LGB_
VERBOSITY
,
num_threads
=
.LGB_MAX_THREADS
,
num_threads
=
.LGB_MAX_THREADS
)
)
,
nrounds
=
2L
,
nrounds
=
2L
...
@@ -815,7 +808,7 @@ test_that("all parameters are stored correctly with save_model_to_string()", {
...
@@ -815,7 +808,7 @@ test_that("all parameters are stored correctly with save_model_to_string()", {
)
)
,
data
=
dtrain
,
data
=
dtrain
,
nrounds
=
nrounds
,
nrounds
=
nrounds
,
verbose
=
VERBOSITY
,
verbose
=
.LGB_
VERBOSITY
)
)
model_str
<-
bst
$
save_model_to_string
()
model_str
<-
bst
$
save_model_to_string
()
...
@@ -832,7 +825,7 @@ test_that("all parameters are stored correctly with save_model_to_string()", {
...
@@ -832,7 +825,7 @@ test_that("all parameters are stored correctly with save_model_to_string()", {
expect_equal
(
sum
(
params_in_file
==
"[objective: regression]"
),
1L
)
expect_equal
(
sum
(
params_in_file
==
"[objective: regression]"
),
1L
)
expect_equal
(
sum
(
startsWith
(
params_in_file
,
"[verbosity:"
)),
1L
)
expect_equal
(
sum
(
startsWith
(
params_in_file
,
"[verbosity:"
)),
1L
)
expect_equal
(
sum
(
params_in_file
==
sprintf
(
"[verbosity: %i]"
,
VERBOSITY
)),
1L
)
expect_equal
(
sum
(
params_in_file
==
sprintf
(
"[verbosity: %i]"
,
.LGB_
VERBOSITY
)),
1L
)
# early stopping should be off by default
# early stopping should be off by default
expect_equal
(
sum
(
startsWith
(
params_in_file
,
"[early_stopping_round:"
)),
1L
)
expect_equal
(
sum
(
startsWith
(
params_in_file
,
"[early_stopping_round:"
)),
1L
)
...
@@ -879,7 +872,7 @@ test_that("early_stopping, num_iterations are stored correctly in model string e
...
@@ -879,7 +872,7 @@ test_that("early_stopping, num_iterations are stored correctly in model string e
,
valids
=
list
(
,
valids
=
list
(
"random_valid"
=
dvalid
"random_valid"
=
dvalid
)
)
,
verbose
=
VERBOSITY
,
verbose
=
.LGB_
VERBOSITY
)
)
model_str
<-
bst
$
save_model_to_string
()
model_str
<-
bst
$
save_model_to_string
()
...
@@ -911,7 +904,7 @@ test_that("Booster: method calls Booster with a null handle should raise an info
...
@@ -911,7 +904,7 @@ test_that("Booster: method calls Booster with a null handle should raise an info
,
num_threads
=
.LGB_MAX_THREADS
,
num_threads
=
.LGB_MAX_THREADS
)
)
,
data
=
dtrain
,
data
=
dtrain
,
verbose
=
VERBOSITY
,
verbose
=
.LGB_
VERBOSITY
,
nrounds
=
5L
,
nrounds
=
5L
,
valids
=
list
(
,
valids
=
list
(
train
=
dtrain
train
=
dtrain
...
@@ -987,7 +980,7 @@ test_that("Booster$new() using a Dataset with a null handle should raise an info
...
@@ -987,7 +980,7 @@ test_that("Booster$new() using a Dataset with a null handle should raise an info
bst
<-
Booster
$
new
(
bst
<-
Booster
$
new
(
train_set
=
dtrain
train_set
=
dtrain
,
params
=
list
(
,
params
=
list
(
verbose
=
VERBOSITY
verbose
=
.LGB_
VERBOSITY
)
)
)
)
},
regexp
=
"Attempting to create a Dataset without any raw data"
)
},
regexp
=
"Attempting to create a Dataset without any raw data"
)
...
@@ -1098,7 +1091,7 @@ test_that("lgb.cv() correctly handles passing through params to the model file",
...
@@ -1098,7 +1091,7 @@ test_that("lgb.cv() correctly handles passing through params to the model file",
,
n_iter
=
n_iter
,
n_iter
=
n_iter
,
early_stopping_round
=
early_stopping_round
,
early_stopping_round
=
early_stopping_round
,
n_iter_no_change
=
n_iter_no_change
,
n_iter_no_change
=
n_iter_no_change
,
verbose
=
VERBOSITY
,
verbose
=
.LGB_
VERBOSITY
,
num_threads
=
.LGB_MAX_THREADS
,
num_threads
=
.LGB_MAX_THREADS
)
)
...
@@ -1108,7 +1101,7 @@ test_that("lgb.cv() correctly handles passing through params to the model file",
...
@@ -1108,7 +1101,7 @@ test_that("lgb.cv() correctly handles passing through params to the model file",
,
nrounds
=
nrounds_kwarg
,
nrounds
=
nrounds_kwarg
,
early_stopping_rounds
=
early_stopping_round_kwarg
,
early_stopping_rounds
=
early_stopping_round_kwarg
,
nfold
=
3L
,
nfold
=
3L
,
verbose
=
VERBOSITY
,
verbose
=
.LGB_
VERBOSITY
)
)
for
(
bst
in
cv_bst
$
boosters
)
{
for
(
bst
in
cv_bst
$
boosters
)
{
...
@@ -1143,7 +1136,7 @@ test_that("params (including dataset params) should be stored in .rds file for B
...
@@ -1143,7 +1136,7 @@ test_that("params (including dataset params) should be stored in .rds file for B
objective
=
"binary"
objective
=
"binary"
,
max_depth
=
4L
,
max_depth
=
4L
,
bagging_fraction
=
0.8
,
bagging_fraction
=
0.8
,
verbose
=
VERBOSITY
,
verbose
=
.LGB_
VERBOSITY
,
num_threads
=
.LGB_MAX_THREADS
,
num_threads
=
.LGB_MAX_THREADS
)
)
bst
<-
Booster
$
new
(
bst
<-
Booster
$
new
(
...
@@ -1160,7 +1153,7 @@ test_that("params (including dataset params) should be stored in .rds file for B
...
@@ -1160,7 +1153,7 @@ test_that("params (including dataset params) should be stored in .rds file for B
objective
=
"binary"
objective
=
"binary"
,
max_depth
=
4L
,
max_depth
=
4L
,
bagging_fraction
=
0.8
,
bagging_fraction
=
0.8
,
verbose
=
VERBOSITY
,
verbose
=
.LGB_
VERBOSITY
,
num_threads
=
.LGB_MAX_THREADS
,
num_threads
=
.LGB_MAX_THREADS
,
max_bin
=
17L
,
max_bin
=
17L
)
)
...
@@ -1180,7 +1173,7 @@ test_that("params (including dataset params) should be stored in .rds file for B
...
@@ -1180,7 +1173,7 @@ test_that("params (including dataset params) should be stored in .rds file for B
objective
=
"binary"
objective
=
"binary"
,
max_depth
=
4L
,
max_depth
=
4L
,
bagging_fraction
=
0.8
,
bagging_fraction
=
0.8
,
verbose
=
VERBOSITY
,
verbose
=
.LGB_
VERBOSITY
,
num_threads
=
.LGB_MAX_THREADS
,
num_threads
=
.LGB_MAX_THREADS
)
)
bst
<-
Booster
$
new
(
bst
<-
Booster
$
new
(
...
@@ -1197,7 +1190,7 @@ test_that("params (including dataset params) should be stored in .rds file for B
...
@@ -1197,7 +1190,7 @@ test_that("params (including dataset params) should be stored in .rds file for B
objective
=
"binary"
objective
=
"binary"
,
max_depth
=
4L
,
max_depth
=
4L
,
bagging_fraction
=
0.8
,
bagging_fraction
=
0.8
,
verbose
=
VERBOSITY
,
verbose
=
.LGB_
VERBOSITY
,
num_threads
=
.LGB_MAX_THREADS
,
num_threads
=
.LGB_MAX_THREADS
,
max_bin
=
17L
,
max_bin
=
17L
)
)
...
@@ -1212,7 +1205,7 @@ test_that("Handle is automatically restored when calling predict", {
...
@@ -1212,7 +1205,7 @@ test_that("Handle is automatically restored when calling predict", {
,
nrounds
=
5L
,
nrounds
=
5L
,
obj
=
"binary"
,
obj
=
"binary"
,
params
=
list
(
,
params
=
list
(
verbose
=
VERBOSITY
verbose
=
.LGB_
VERBOSITY
)
)
,
num_threads
=
.LGB_MAX_THREADS
,
num_threads
=
.LGB_MAX_THREADS
)
)
...
@@ -1236,7 +1229,7 @@ test_that("boosters with linear models at leaves work with saveRDS.lgb.Booster a
...
@@ -1236,7 +1229,7 @@ test_that("boosters with linear models at leaves work with saveRDS.lgb.Booster a
params
<-
list
(
params
<-
list
(
objective
=
"regression"
objective
=
"regression"
,
verbose
=
VERBOSITY
,
verbose
=
.LGB_
VERBOSITY
,
metric
=
"mse"
,
metric
=
"mse"
,
seed
=
0L
,
seed
=
0L
,
num_leaves
=
2L
,
num_leaves
=
2L
...
@@ -1276,7 +1269,7 @@ test_that("boosters with linear models at leaves can be written to RDS and re-lo
...
@@ -1276,7 +1269,7 @@ test_that("boosters with linear models at leaves can be written to RDS and re-lo
params
<-
list
(
params
<-
list
(
objective
=
"regression"
objective
=
"regression"
,
verbose
=
VERBOSITY
,
verbose
=
.LGB_
VERBOSITY
,
metric
=
"mse"
,
metric
=
"mse"
,
seed
=
0L
,
seed
=
0L
,
num_leaves
=
2L
,
num_leaves
=
2L
...
@@ -1386,7 +1379,7 @@ test_that("Booster's print, show, and summary work correctly", {
...
@@ -1386,7 +1379,7 @@ test_that("Booster's print, show, and summary work correctly", {
min_data_in_bin
=
1L
min_data_in_bin
=
1L
)
)
)
)
,
verbose
=
VERBOSITY
,
verbose
=
.LGB_
VERBOSITY
,
nrounds
=
5L
,
nrounds
=
5L
)
)
.check_methods_work
(
model
)
.check_methods_work
(
model
)
...
@@ -1398,7 +1391,7 @@ test_that("Booster's print, show, and summary work correctly", {
...
@@ -1398,7 +1391,7 @@ test_that("Booster's print, show, and summary work correctly", {
as.matrix
(
iris
[,
-5L
])
as.matrix
(
iris
[,
-5L
])
,
label
=
as.numeric
(
factor
(
iris
$
Species
))
-
1.0
,
label
=
as.numeric
(
factor
(
iris
$
Species
))
-
1.0
)
)
,
verbose
=
VERBOSITY
,
verbose
=
.LGB_
VERBOSITY
,
nrounds
=
5L
,
nrounds
=
5L
)
)
.check_methods_work
(
model
)
.check_methods_work
(
model
)
...
@@ -1431,7 +1424,7 @@ test_that("Booster's print, show, and summary work correctly", {
...
@@ -1431,7 +1424,7 @@ test_that("Booster's print, show, and summary work correctly", {
)
)
,
obj
=
.logregobj
,
obj
=
.logregobj
,
eval
=
.evalerror
,
eval
=
.evalerror
,
verbose
=
VERBOSITY
,
verbose
=
.LGB_
VERBOSITY
,
nrounds
=
5L
,
nrounds
=
5L
,
params
=
list
(
num_threads
=
.LGB_MAX_THREADS
)
,
params
=
list
(
num_threads
=
.LGB_MAX_THREADS
)
)
)
...
@@ -1454,7 +1447,7 @@ test_that("LGBM_BoosterGetNumFeature_R returns correct outputs", {
...
@@ -1454,7 +1447,7 @@ test_that("LGBM_BoosterGetNumFeature_R returns correct outputs", {
min_data_in_bin
=
1L
min_data_in_bin
=
1L
)
)
)
)
,
verbose
=
VERBOSITY
,
verbose
=
.LGB_
VERBOSITY
,
nrounds
=
5L
,
nrounds
=
5L
)
)
ncols
<-
.Call
(
LGBM_BoosterGetNumFeature_R
,
model
$
.__enclos_env__
$
private
$
handle
)
ncols
<-
.Call
(
LGBM_BoosterGetNumFeature_R
,
model
$
.__enclos_env__
$
private
$
handle
)
...
@@ -1467,7 +1460,7 @@ test_that("LGBM_BoosterGetNumFeature_R returns correct outputs", {
...
@@ -1467,7 +1460,7 @@ test_that("LGBM_BoosterGetNumFeature_R returns correct outputs", {
as.matrix
(
iris
[,
-5L
])
as.matrix
(
iris
[,
-5L
])
,
label
=
as.numeric
(
factor
(
iris
$
Species
))
-
1.0
,
label
=
as.numeric
(
factor
(
iris
$
Species
))
-
1.0
)
)
,
verbose
=
VERBOSITY
,
verbose
=
.LGB_
VERBOSITY
,
nrounds
=
5L
,
nrounds
=
5L
)
)
ncols
<-
.Call
(
LGBM_BoosterGetNumFeature_R
,
model
$
.__enclos_env__
$
private
$
handle
)
ncols
<-
.Call
(
LGBM_BoosterGetNumFeature_R
,
model
$
.__enclos_env__
$
private
$
handle
)
...
...
R-package/tests/testthat/test_lgb.interprete.R
View file @
44928d3a
VERBOSITY
<-
as.integer
(
Sys.getenv
(
"LIGHTGBM_TEST_VERBOSITY"
,
"-1"
)
)
.sigmoid
<-
function
(
x
)
{
.sigmoid
<-
function
(
x
)
{
1.0
/
(
1.0
+
exp
(
-
x
))
1.0
/
(
1.0
+
exp
(
-
x
))
}
}
...
@@ -30,7 +26,7 @@ test_that("lgb.intereprete works as expected for binary classification", {
...
@@ -30,7 +26,7 @@ test_that("lgb.intereprete works as expected for binary classification", {
,
max_depth
=
-1L
,
max_depth
=
-1L
,
min_data_in_leaf
=
1L
,
min_data_in_leaf
=
1L
,
min_sum_hessian_in_leaf
=
1.0
,
min_sum_hessian_in_leaf
=
1.0
,
verbose
=
VERBOSITY
,
verbose
=
.LGB_
VERBOSITY
,
num_threads
=
.LGB_MAX_THREADS
,
num_threads
=
.LGB_MAX_THREADS
)
)
model
<-
lgb.train
(
model
<-
lgb.train
(
...
@@ -83,7 +79,7 @@ test_that("lgb.intereprete works as expected for multiclass classification", {
...
@@ -83,7 +79,7 @@ test_that("lgb.intereprete works as expected for multiclass classification", {
,
num_class
=
3L
,
num_class
=
3L
,
learning_rate
=
0.00001
,
learning_rate
=
0.00001
,
min_data
=
1L
,
min_data
=
1L
,
verbose
=
VERBOSITY
,
verbose
=
.LGB_
VERBOSITY
,
num_threads
=
.LGB_MAX_THREADS
,
num_threads
=
.LGB_MAX_THREADS
)
)
model
<-
lgb.train
(
model
<-
lgb.train
(
...
...
R-package/tests/testthat/test_lgb.plot.importance.R
View file @
44928d3a
VERBOSITY
<-
as.integer
(
Sys.getenv
(
"LIGHTGBM_TEST_VERBOSITY"
,
"-1"
)
)
test_that
(
"lgb.plot.importance() should run without error for well-formed inputs"
,
{
test_that
(
"lgb.plot.importance() should run without error for well-formed inputs"
,
{
data
(
agaricus.train
,
package
=
"lightgbm"
)
data
(
agaricus.train
,
package
=
"lightgbm"
)
train
<-
agaricus.train
train
<-
agaricus.train
...
@@ -13,7 +9,7 @@ test_that("lgb.plot.importance() should run without error for well-formed inputs
...
@@ -13,7 +9,7 @@ test_that("lgb.plot.importance() should run without error for well-formed inputs
,
max_depth
=
-1L
,
max_depth
=
-1L
,
min_data_in_leaf
=
1L
,
min_data_in_leaf
=
1L
,
min_sum_hessian_in_leaf
=
1.0
,
min_sum_hessian_in_leaf
=
1.0
,
verbosity
=
VERBOSITY
,
verbosity
=
.LGB_
VERBOSITY
,
num_threads
=
.LGB_MAX_THREADS
,
num_threads
=
.LGB_MAX_THREADS
)
)
model
<-
lgb.train
(
params
,
dtrain
,
3L
)
model
<-
lgb.train
(
params
,
dtrain
,
3L
)
...
...
R-package/tests/testthat/test_lgb.plot.interpretation.R
View file @
44928d3a
VERBOSITY
<-
as.integer
(
Sys.getenv
(
"LIGHTGBM_TEST_VERBOSITY"
,
"-1"
)
)
.sigmoid
<-
function
(
x
)
{
.sigmoid
<-
function
(
x
)
{
1.0
/
(
1.0
+
exp
(
-
x
))
1.0
/
(
1.0
+
exp
(
-
x
))
}
}
...
@@ -30,7 +26,7 @@ test_that("lgb.plot.interepretation works as expected for binary classification"
...
@@ -30,7 +26,7 @@ test_that("lgb.plot.interepretation works as expected for binary classification"
,
max_depth
=
-1L
,
max_depth
=
-1L
,
min_data_in_leaf
=
1L
,
min_data_in_leaf
=
1L
,
min_sum_hessian_in_leaf
=
1.0
,
min_sum_hessian_in_leaf
=
1.0
,
verbosity
=
VERBOSITY
,
verbosity
=
.LGB_
VERBOSITY
,
num_threads
=
.LGB_MAX_THREADS
,
num_threads
=
.LGB_MAX_THREADS
)
)
model
<-
lgb.train
(
model
<-
lgb.train
(
...
@@ -87,7 +83,7 @@ test_that("lgb.plot.interepretation works as expected for multiclass classificat
...
@@ -87,7 +83,7 @@ test_that("lgb.plot.interepretation works as expected for multiclass classificat
params
=
params
params
=
params
,
data
=
dtrain
,
data
=
dtrain
,
nrounds
=
3L
,
nrounds
=
3L
,
verbose
=
VERBOSITY
,
verbose
=
.LGB_
VERBOSITY
)
)
num_trees
<-
5L
num_trees
<-
5L
tree_interpretation
<-
lgb.interprete
(
tree_interpretation
<-
lgb.interprete
(
...
...
R-package/tests/testthat/test_weighted_loss.R
View file @
44928d3a
VERBOSITY
<-
as.integer
(
Sys.getenv
(
"LIGHTGBM_TEST_VERBOSITY"
,
"-1"
)
)
test_that
(
"Gamma regression reacts on 'weight'"
,
{
test_that
(
"Gamma regression reacts on 'weight'"
,
{
n
<-
100L
n
<-
100L
set.seed
(
87L
)
set.seed
(
87L
)
...
@@ -17,7 +13,7 @@ test_that("Gamma regression reacts on 'weight'", {
...
@@ -17,7 +13,7 @@ test_that("Gamma regression reacts on 'weight'", {
params
=
params
params
=
params
,
data
=
dtrain
,
data
=
dtrain
,
nrounds
=
4L
,
nrounds
=
4L
,
verbose
=
VERBOSITY
,
verbose
=
.LGB_
VERBOSITY
)
)
pred_unweighted
<-
predict
(
bst
,
X_pred
)
pred_unweighted
<-
predict
(
bst
,
X_pred
)
...
@@ -31,7 +27,7 @@ test_that("Gamma regression reacts on 'weight'", {
...
@@ -31,7 +27,7 @@ test_that("Gamma regression reacts on 'weight'", {
params
=
params
params
=
params
,
data
=
dtrain
,
data
=
dtrain
,
nrounds
=
4L
,
nrounds
=
4L
,
verbose
=
VERBOSITY
,
verbose
=
.LGB_
VERBOSITY
)
)
pred_weighted_1
<-
predict
(
bst
,
X_pred
)
pred_weighted_1
<-
predict
(
bst
,
X_pred
)
...
@@ -45,7 +41,7 @@ test_that("Gamma regression reacts on 'weight'", {
...
@@ -45,7 +41,7 @@ test_that("Gamma regression reacts on 'weight'", {
params
=
params
params
=
params
,
data
=
dtrain
,
data
=
dtrain
,
nrounds
=
4L
,
nrounds
=
4L
,
verbose
=
VERBOSITY
,
verbose
=
.LGB_
VERBOSITY
)
)
pred_weighted_2
<-
predict
(
bst
,
X_pred
)
pred_weighted_2
<-
predict
(
bst
,
X_pred
)
...
@@ -59,7 +55,7 @@ test_that("Gamma regression reacts on 'weight'", {
...
@@ -59,7 +55,7 @@ test_that("Gamma regression reacts on 'weight'", {
params
=
params
params
=
params
,
data
=
dtrain
,
data
=
dtrain
,
nrounds
=
4L
,
nrounds
=
4L
,
verbose
=
VERBOSITY
,
verbose
=
.LGB_
VERBOSITY
)
)
pred_weighted
<-
predict
(
bst
,
X_pred
)
pred_weighted
<-
predict
(
bst
,
X_pred
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment