Unverified Commit bd2e949e authored by James Lamb's avatar James Lamb Committed by GitHub
Browse files

[R-package] reduce verbosity in some unit tests (#4879)

* [R-package] reduce verbosity in some unit tests

* simplify

* Update R-package/tests/testthat/test_lgb.plot.interpretation.R
parent 45230207
...@@ -16,6 +16,8 @@ ...@@ -16,6 +16,8 @@
- [Installing from a Pre-compiled lib_lightgbm](#lib_lightgbm) - [Installing from a Pre-compiled lib_lightgbm](#lib_lightgbm)
* [Examples](#examples) * [Examples](#examples)
* [Testing](#testing) * [Testing](#testing)
- [Running the Tests](#running-the-tests)
- [Code Coverage](#code-coverage)
* [Preparing a CRAN Package](#preparing-a-cran-package) * [Preparing a CRAN Package](#preparing-a-cran-package)
* [External Repositories](#external-unofficial-repositories) * [External Repositories](#external-unofficial-repositories)
* [Known Issues](#known-issues) * [Known Issues](#known-issues)
...@@ -234,6 +236,29 @@ Testing ...@@ -234,6 +236,29 @@ Testing
The R package's unit tests are run automatically on every commit, via integrations like [GitHub Actions](https://github.com/microsoft/LightGBM/actions). Adding new tests in `R-package/tests/testthat` is a valuable way to improve the reliability of the R package. The R package's unit tests are run automatically on every commit, via integrations like [GitHub Actions](https://github.com/microsoft/LightGBM/actions). Adding new tests in `R-package/tests/testthat` is a valuable way to improve the reliability of the R package.
### Running the Tests
While developing the R package, run the code below to run the unit tests.
```shell
sh build-cran-package.sh \
--no-build-vignettes
R CMD INSTALL --with-keep.source lightgbm*.tar.gz
cd R-package/tests
Rscript testthat.R
```
To run the tests with more verbose logs, set environment variable `LIGHTGBM_TEST_VERBOSITY` to a valid value for parameter [`verbosity`](https://lightgbm.readthedocs.io/en/latest/Parameters.html#verbosity).
```shell
export LIGHTGBM_TEST_VERBOSITY=1
cd R-package/tests
Rscript testthat.R
```
### Code Coverage
When adding tests, you may want to use test coverage to identify untested areas and to check if the tests you've added are covering all branches of the intended code. When adding tests, you may want to use test coverage to identify untested areas and to check if the tests you've added are covering all branches of the intended code.
The example below shows how to generate code coverage for the R package on a macOS or Linux setup. To adjust for your environment, refer to [the customization step described above](#custom-installation-linux-mac). The example below shows how to generate code coverage for the R package on a macOS or Linux setup. To adjust for your environment, refer to [the customization step described above](#custom-installation-linux-mac).
......
VERBOSITY <- as.integer(
Sys.getenv("LIGHTGBM_TEST_VERBOSITY", "-1")
)
context("Predictor") context("Predictor")
test_that("Predictor$finalize() should not fail", { test_that("Predictor$finalize() should not fail", {
...@@ -9,7 +13,7 @@ test_that("Predictor$finalize() should not fail", { ...@@ -9,7 +13,7 @@ test_that("Predictor$finalize() should not fail", {
, params = list( , params = list(
objective = "regression" objective = "regression"
) )
, verbose = -1L , verbose = VERBOSITY
, nrounds = 3L , nrounds = 3L
) )
model_file <- tempfile(fileext = ".model") model_file <- tempfile(fileext = ".model")
...@@ -37,7 +41,7 @@ test_that("predictions do not fail for integer input", { ...@@ -37,7 +41,7 @@ test_that("predictions do not fail for integer input", {
, params = list( , params = list(
objective = "regression" objective = "regression"
) )
, verbose = -1L , verbose = VERBOSITY
, nrounds = 3L , nrounds = 3L
) )
X_double <- X[c(1L, 51L, 101L), , drop = FALSE] X_double <- X[c(1L, 51L, 101L), , drop = FALSE]
...@@ -70,6 +74,7 @@ test_that("start_iteration works correctly", { ...@@ -70,6 +74,7 @@ test_that("start_iteration works correctly", {
num_leaves = 4L num_leaves = 4L
, learning_rate = 0.6 , learning_rate = 0.6
, objective = "binary" , objective = "binary"
, verbosity = VERBOSITY
) )
, nrounds = 50L , nrounds = 50L
, valids = list("test" = dtest) , valids = list("test" = dtest)
......
VERBOSITY <- as.integer(
Sys.getenv("LIGHTGBM_TEST_VERBOSITY", "-1")
)
context("lgb.interpete") context("lgb.interpete")
.sigmoid <- function(x) { .sigmoid <- function(x) {
...@@ -28,6 +32,7 @@ test_that("lgb.intereprete works as expected for binary classification", { ...@@ -28,6 +32,7 @@ test_that("lgb.intereprete works as expected for binary classification", {
, max_depth = -1L , max_depth = -1L
, min_data_in_leaf = 1L , min_data_in_leaf = 1L
, min_sum_hessian_in_leaf = 1.0 , min_sum_hessian_in_leaf = 1.0
, verbose = VERBOSITY
) )
model <- lgb.train( model <- lgb.train(
params = params params = params
...@@ -79,6 +84,7 @@ test_that("lgb.intereprete works as expected for multiclass classification", { ...@@ -79,6 +84,7 @@ test_that("lgb.intereprete works as expected for multiclass classification", {
, num_class = 3L , num_class = 3L
, learning_rate = 0.00001 , learning_rate = 0.00001
, min_data = 1L , min_data = 1L
, verbose = VERBOSITY
) )
model <- lgb.train( model <- lgb.train(
params = params params = params
......
VERBOSITY <- as.integer(
Sys.getenv("LIGHTGBM_TEST_VERBOSITY", "-1")
)
context("lgb.plot.importance()") context("lgb.plot.importance()")
test_that("lgb.plot.importance() should run without error for well-formed inputs", { test_that("lgb.plot.importance() should run without error for well-formed inputs", {
...@@ -11,6 +15,7 @@ test_that("lgb.plot.importance() should run without error for well-formed inputs ...@@ -11,6 +15,7 @@ test_that("lgb.plot.importance() should run without error for well-formed inputs
, max_depth = -1L , max_depth = -1L
, min_data_in_leaf = 1L , min_data_in_leaf = 1L
, min_sum_hessian_in_leaf = 1.0 , min_sum_hessian_in_leaf = 1.0
, verbosity = VERBOSITY
) )
model <- lgb.train(params, dtrain, 3L) model <- lgb.train(params, dtrain, 3L)
tree_imp <- lgb.importance(model, percentage = TRUE) tree_imp <- lgb.importance(model, percentage = TRUE)
......
VERBOSITY <- as.integer(
Sys.getenv("LIGHTGBM_TEST_VERBOSITY", "-1")
)
context("lgb.plot.interpretation") context("lgb.plot.interpretation")
.sigmoid <- function(x) { .sigmoid <- function(x) {
...@@ -28,6 +32,7 @@ test_that("lgb.plot.interepretation works as expected for binary classification" ...@@ -28,6 +32,7 @@ test_that("lgb.plot.interepretation works as expected for binary classification"
, max_depth = -1L , max_depth = -1L
, min_data_in_leaf = 1L , min_data_in_leaf = 1L
, min_sum_hessian_in_leaf = 1.0 , min_sum_hessian_in_leaf = 1.0
, verbosity = VERBOSITY
) )
model <- lgb.train( model <- lgb.train(
params = params params = params
...@@ -82,6 +87,7 @@ test_that("lgb.plot.interepretation works as expected for multiclass classificat ...@@ -82,6 +87,7 @@ test_that("lgb.plot.interepretation works as expected for multiclass classificat
params = params params = params
, data = dtrain , data = dtrain
, nrounds = 3L , nrounds = 3L
, verbose = VERBOSITY
) )
num_trees <- 5L num_trees <- 5L
tree_interpretation <- lgb.interprete( tree_interpretation <- lgb.interprete(
......
VERBOSITY <- as.integer(
Sys.getenv("LIGHTGBM_TEST_VERBOSITY", "-1")
)
context("lgb.unloader") context("lgb.unloader")
test_that("lgb.unloader works as expected", { test_that("lgb.unloader works as expected", {
...@@ -10,6 +14,7 @@ test_that("lgb.unloader works as expected", { ...@@ -10,6 +14,7 @@ test_that("lgb.unloader works as expected", {
, metric = "l2" , metric = "l2"
, min_data = 1L , min_data = 1L
, learning_rate = 1.0 , learning_rate = 1.0
, verbosity = VERBOSITY
) )
, data = dtrain , data = dtrain
, nrounds = 1L , nrounds = 1L
...@@ -30,6 +35,7 @@ test_that("lgb.unloader finds all boosters and removes them", { ...@@ -30,6 +35,7 @@ test_that("lgb.unloader finds all boosters and removes them", {
, metric = "l2" , metric = "l2"
, min_data = 1L , min_data = 1L
, learning_rate = 1.0 , learning_rate = 1.0
, verbosity = VERBOSITY
) )
, data = dtrain , data = dtrain
, nrounds = 1L , nrounds = 1L
...@@ -40,6 +46,7 @@ test_that("lgb.unloader finds all boosters and removes them", { ...@@ -40,6 +46,7 @@ test_that("lgb.unloader finds all boosters and removes them", {
, metric = "l2" , metric = "l2"
, min_data = 1L , min_data = 1L
, learning_rate = 1.0 , learning_rate = 1.0
, verbosity = VERBOSITY
) )
, data = dtrain , data = dtrain
, nrounds = 1L , nrounds = 1L
......
VERBOSITY <- as.integer(
Sys.getenv("LIGHTGBM_TEST_VERBOSITY", "-1")
)
context("Case weights are respected") context("Case weights are respected")
test_that("Gamma regression reacts on 'weight'", { test_that("Gamma regression reacts on 'weight'", {
...@@ -15,7 +19,7 @@ test_that("Gamma regression reacts on 'weight'", { ...@@ -15,7 +19,7 @@ test_that("Gamma regression reacts on 'weight'", {
params = params params = params
, data = dtrain , data = dtrain
, nrounds = 4L , nrounds = 4L
, verbose = 0L , verbose = VERBOSITY
) )
pred_unweighted <- predict(bst, X_pred) pred_unweighted <- predict(bst, X_pred)
...@@ -29,7 +33,7 @@ test_that("Gamma regression reacts on 'weight'", { ...@@ -29,7 +33,7 @@ test_that("Gamma regression reacts on 'weight'", {
params = params params = params
, data = dtrain , data = dtrain
, nrounds = 4L , nrounds = 4L
, verbose = 0L , verbose = VERBOSITY
) )
pred_weighted_1 <- predict(bst, X_pred) pred_weighted_1 <- predict(bst, X_pred)
...@@ -43,7 +47,7 @@ test_that("Gamma regression reacts on 'weight'", { ...@@ -43,7 +47,7 @@ test_that("Gamma regression reacts on 'weight'", {
params = params params = params
, data = dtrain , data = dtrain
, nrounds = 4L , nrounds = 4L
, verbose = 0L , verbose = VERBOSITY
) )
pred_weighted_2 <- predict(bst, X_pred) pred_weighted_2 <- predict(bst, X_pred)
...@@ -57,7 +61,7 @@ test_that("Gamma regression reacts on 'weight'", { ...@@ -57,7 +61,7 @@ test_that("Gamma regression reacts on 'weight'", {
params = params params = params
, data = dtrain , data = dtrain
, nrounds = 4L , nrounds = 4L
, verbose = 0L , verbose = VERBOSITY
) )
pred_weighted <- predict(bst, X_pred) pred_weighted <- predict(bst, X_pred)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment