Unverified Commit 1684419f authored by Nick Miller's avatar Nick Miller Committed by GitHub
Browse files

[R-package] Add specific error messages to `test_Predictor` and `test_dataset` (#6931)


Co-authored-by: default avatarJames Lamb <jaylamb20@gmail.com>
parent 618cfafa
......@@ -1018,7 +1018,7 @@ predict.lgb.Booster <- function(object,
...) {
if (!.is_Booster(x = object)) {
stop("predict.lgb.Booster: object should be an ", sQuote("lgb.Booster"))
stop("predict.lgb.Booster: object should be an ", sQuote("lgb.Booster", q = FALSE))
}
additional_params <- list(...)
......@@ -1175,7 +1175,7 @@ lgb.configure_fast_predict <- function(model,
type = "response",
params = list()) {
if (!.is_Booster(x = model)) {
stop("lgb.configure_fast_predict: model should be an ", sQuote("lgb.Booster"))
stop("lgb.configure_fast_predict: model should be an ", sQuote("lgb.Booster", q = FALSE))
}
if (type == "class") {
stop("type='class' is not supported for 'lgb.configure_fast_predict'. Use 'response' instead.")
......@@ -1391,7 +1391,7 @@ lgb.save <- function(
) {
if (!.is_Booster(x = booster)) {
stop("lgb.save: booster should be an ", sQuote("lgb.Booster"))
stop("lgb.save: booster should be an ", sQuote("lgb.Booster", q = FALSE))
}
if (!(is.character(filename) && length(filename) == 1L)) {
......@@ -1455,7 +1455,7 @@ lgb.save <- function(
lgb.dump <- function(booster, num_iteration = NULL, start_iteration = 1L) {
if (!.is_Booster(x = booster)) {
stop("lgb.dump: booster should be an ", sQuote("lgb.Booster"))
stop("lgb.dump: booster should be an ", sQuote("lgb.Booster", q = FALSE))
}
# Return booster at requested iteration
......@@ -1519,7 +1519,7 @@ lgb.dump <- function(booster, num_iteration = NULL, start_iteration = 1L) {
lgb.get.eval.result <- function(booster, data_name, eval_name, iters = NULL, is_err = FALSE) {
if (!.is_Booster(x = booster)) {
stop("lgb.get.eval.result: Can only use ", sQuote("lgb.Booster"), " to get eval result")
stop("lgb.get.eval.result: Can only use ", sQuote("lgb.Booster", q = FALSE), " to get eval result")
}
if (!is.character(data_name) || !is.character(eval_name)) {
......
......@@ -46,10 +46,10 @@ Dataset <- R6::R6Class(
# validate inputs early to avoid unnecessary computation
if (!(is.null(reference) || .is_Dataset(reference))) {
stop("lgb.Dataset: If provided, reference must be a ", sQuote("lgb.Dataset"))
stop("lgb.Dataset: If provided, reference must be a ", sQuote("lgb.Dataset", q = FALSE))
}
if (!(is.null(predictor) || .is_Predictor(predictor))) {
stop("lgb.Dataset: If provided, predictor must be a ", sQuote("lgb.Predictor"))
stop("lgb.Dataset: If provided, predictor must be a ", sQuote("lgb.Predictor", q = FALSE))
}
info <- list()
......@@ -152,7 +152,7 @@ Dataset <- R6::R6Class(
if (sum(is.na(cate_indices)) > 0L) {
stop(
"lgb.Dataset.construct: supplied an unknown feature in categorical_feature: "
, sQuote(private$categorical_feature[is.na(cate_indices)])
, sQuote(private$categorical_feature[is.na(cate_indices)], q = FALSE)
)
}
......@@ -250,7 +250,7 @@ Dataset <- R6::R6Class(
# Unknown data type
stop(
"lgb.Dataset.construct: does not support constructing from "
, sQuote(class(private$raw_data))
, sQuote(class(private$raw_data), q = FALSE)
)
}
......@@ -465,8 +465,8 @@ Dataset <- R6::R6Class(
# Check if attribute key is in the known attribute list
if (!is.character(field_name) || length(field_name) != 1L || !field_name %in% .INFO_KEYS()) {
stop(
"Dataset$get_field(): field_name must one of the following: "
, toString(sQuote(.INFO_KEYS()))
"Dataset$get_field(): field_name must be one of the following: "
, toString(sQuote(.INFO_KEYS(), q = FALSE))
)
}
......@@ -516,8 +516,8 @@ Dataset <- R6::R6Class(
# Check if attribute key is in the known attribute list
if (!is.character(field_name) || length(field_name) != 1L || !field_name %in% .INFO_KEYS()) {
stop(
"Dataset$set_field(): field_name must one of the following: "
, toString(sQuote(.INFO_KEYS()))
"Dataset$set_field(): field_name must be one of the following: "
, toString(sQuote(.INFO_KEYS(), q = FALSE))
)
}
......@@ -1024,7 +1024,7 @@ dimnames.lgb.Dataset <- function(x) {
# Check if invalid element list
if (!identical(class(value), "list") || length(value) != 2L) {
stop("invalid ", sQuote("value"), " given: must be a list of two elements")
stop("invalid ", sQuote("value", q = FALSE), " given: must be a list of two elements")
}
# Check for unknown row names
......@@ -1043,9 +1043,9 @@ dimnames.lgb.Dataset <- function(x) {
if (ncol(x) != length(value[[2L]])) {
stop(
"can't assign "
, sQuote(length(value[[2L]]))
, sQuote(length(value[[2L]]), q = FALSE)
, " colnames to an lgb.Dataset with "
, sQuote(ncol(x))
, sQuote(ncol(x), q = FALSE)
, " columns"
)
}
......
......@@ -442,7 +442,7 @@ Predictor <- R6::R6Class(
} else {
stop("predict: cannot predict on data of class ", sQuote(class(data)))
stop("predict: cannot predict on data of class ", sQuote(class(data), q = FALSE))
}
}
......@@ -451,9 +451,9 @@ Predictor <- R6::R6Class(
if (length(preds) %% num_row != 0L) {
stop(
"predict: prediction length "
, sQuote(length(preds))
, sQuote(length(preds), q = FALSE)
, " is not a multiple of nrows(data): "
, sQuote(num_row)
, sQuote(num_row, q = FALSE)
)
}
......
......@@ -14,7 +14,7 @@
#' @export
lgb.drop_serialized <- function(model) {
if (!.is_Booster(x = model)) {
stop("lgb.drop_serialized: model should be an ", sQuote("lgb.Booster"))
stop("lgb.drop_serialized: model should be an ", sQuote("lgb.Booster", q = FALSE))
}
model$drop_raw()
return(invisible(model))
......
......@@ -14,7 +14,7 @@
#' @export
lgb.make_serializable <- function(model) {
if (!.is_Booster(x = model)) {
stop("lgb.make_serializable: model should be an ", sQuote("lgb.Booster"))
stop("lgb.make_serializable: model should be an ", sQuote("lgb.Booster", q = FALSE))
}
model$save_raw()
return(invisible(model))
......
......@@ -40,7 +40,7 @@
#' @export
lgb.restore_handle <- function(model) {
if (!.is_Booster(x = model)) {
stop("lgb.restore_handle: model should be an ", sQuote("lgb.Booster"))
stop("lgb.restore_handle: model should be an ", sQuote("lgb.Booster", q = FALSE))
}
model$restore_handle()
return(invisible(model))
......
......@@ -95,7 +95,7 @@
if (any(bad)) {
stop(
"unknown feature(s) in interaction_constraints: "
, toString(sQuote(constraint[bad], q = "'"))
, toString(sQuote(constraint[bad], q = FALSE))
)
}
......
......@@ -187,8 +187,14 @@ test_that("Feature contribution predictions do not take non-general CSR or CSC i
, params = list(min_data_in_leaf = 5L, num_threads = .LGB_MAX_THREADS)
)
expect_error(predict(bst, SmatC, type = "contrib"))
expect_error(predict(bst, SmatR, type = "contrib"))
expect_error(
predict(bst, SmatC, type = "contrib")
, regexp = "Predictions on sparse inputs are only allowed for 'dsparseVector', 'dgRMatrix', 'dgCMatrix' - got: dsCMatrix" # nolint: line_length.
)
expect_error(
predict(bst, SmatR, type = "contrib")
, regexp = "Predictions on sparse inputs are only allowed for 'dsparseVector', 'dgRMatrix', 'dgCMatrix' - got: dsRMatrix" # nolint: line_length.
)
})
test_that("predict() params should override keyword argument for raw-score predictions", {
......
......@@ -46,7 +46,11 @@ test_that("lgb.Dataset: get_field & set_field", {
expect_true(length(get_field(dtest, "init_score")) == 0L)
# any other label should error
expect_error(set_field(dtest, "asdf", test_label))
expect_error(
set_field(dtest, "asdf", test_label)
, regexp = "Dataset$set_field(): field_name must be one of the following: 'label', 'weight', 'init_score', 'group'" # nolint: line_length.
, fixed = TRUE
)
})
test_that("lgb.Dataset: slice, dim", {
......@@ -181,7 +185,7 @@ test_that("lgb.Dataset: colnames", {
expect_equal(colnames(dtest), colnames(test_data))
expect_error({
colnames(dtest) <- "asdf"
})
}, regexp = "can't assign '1' colnames to an lgb.Dataset with '126' columns")
new_names <- make.names(seq_len(ncol(test_data)))
expect_silent({
colnames(dtest) <- new_names
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment