"python-package/vscode:/vscode.git/clone" did not exist on "5e592fe6ff2b6eed83dd77942aab8e464768235c"
Unverified Commit 4167649c authored by James Lamb's avatar James Lamb Committed by GitHub
Browse files

[R-package] remove unnecessary comments (#4383)

* [R-package] remove unnecessary comments

* Update R-package/R/lgb.Booster.R
parent 6afd9bf1
......@@ -145,12 +145,10 @@ Booster <- R6::R6Class(
# Add validation data
add_valid = function(data, name) {
# Check if data is lgb.Dataset
if (!lgb.is.Dataset(data)) {
stop("lgb.Booster.add_valid: Can only use lgb.Dataset as validation data")
}
# Check if predictors are identical
if (!identical(data$.__enclos_env__$private$predictor, private$init_predictor)) {
stop(
"lgb.Booster.add_valid: Failed to add validation data; "
......@@ -158,7 +156,6 @@ Booster <- R6::R6Class(
)
}
# Check if names are character
if (!is.character(name)) {
stop("lgb.Booster.add_valid: Can only use characters as data name")
}
......@@ -170,7 +167,6 @@ Booster <- R6::R6Class(
, data$.__enclos_env__$private$get_handle()
)
# Store private information
private$valid_sets <- c(private$valid_sets, data)
private$name_valid_sets <- c(private$name_valid_sets, name)
private$num_dataset <- private$num_dataset + 1L
......@@ -180,7 +176,6 @@ Booster <- R6::R6Class(
},
# Reset parameters of booster
reset_parameter = function(params, ...) {
if (methods::is(self$params, "list")) {
......@@ -210,27 +205,22 @@ Booster <- R6::R6Class(
}
}
# Check if training set is not null
if (!is.null(train_set)) {
# Check if training set is lgb.Dataset
if (!lgb.is.Dataset(train_set)) {
stop("lgb.Booster.update: Only can use lgb.Dataset as training data")
}
# Check if predictors are identical
if (!identical(train_set$predictor, private$init_predictor)) {
stop("lgb.Booster.update: Change train_set failed, you should use the same predictor for these data")
}
# Reset training data on booster
.Call(
LGBM_BoosterResetTrainingData_R
, private$handle
, train_set$.__enclos_env__$private$get_handle()
)
# Store private train set
private$train_set <- train_set
private$train_set_version <- train_set$.__enclos_env__$private$version
......@@ -249,7 +239,6 @@ Booster <- R6::R6Class(
} else {
# Check if objective is function
if (!is.function(fobj)) {
stop("lgb.Booster.update: fobj should be a function")
}
......@@ -289,7 +278,6 @@ Booster <- R6::R6Class(
# Return one iteration behind
rollback_one_iter = function() {
# Return one iteration behind
.Call(
LGBM_BoosterRollbackOneIter_R
, private$handle
......@@ -346,7 +334,6 @@ Booster <- R6::R6Class(
# Evaluate data on metrics
eval = function(data, name, feval = NULL) {
# Check if dataset is lgb.Dataset
if (!lgb.is.Dataset(data)) {
stop("lgb.Booster.eval: Can only use lgb.Dataset to eval")
}
......@@ -360,7 +347,6 @@ Booster <- R6::R6Class(
# Check for validation data
if (length(private$valid_sets) > 0L) {
# Loop through each validation set
for (i in seq_along(private$valid_sets)) {
# Check for identical validation data with training data
......@@ -406,15 +392,12 @@ Booster <- R6::R6Class(
# Evaluation validation data
eval_valid = function(feval = NULL) {
# Create ret list
ret <- list()
# Check if validation is empty
if (length(private$valid_sets) <= 0L) {
return(ret)
}
# Loop through each validation set
for (i in seq_along(private$valid_sets)) {
ret <- append(
x = ret
......@@ -429,12 +412,10 @@ Booster <- R6::R6Class(
# Save model
save_model = function(filename, num_iteration = NULL, feature_importance_type = 0L) {
# Check if number of iteration is non existent
if (is.null(num_iteration)) {
num_iteration <- self$best_iter
}
# Save booster model
.Call(
LGBM_BoosterSaveModel_R
, private$handle
......@@ -446,10 +427,8 @@ Booster <- R6::R6Class(
return(invisible(self))
},
# Save model to string
save_model_to_string = function(num_iteration = NULL, feature_importance_type = 0L) {
# Check if number of iteration is non existent
if (is.null(num_iteration)) {
num_iteration <- self$best_iter
}
......@@ -468,7 +447,6 @@ Booster <- R6::R6Class(
# Dump model in memory
dump_model = function(num_iteration = NULL, feature_importance_type = 0L) {
# Check if number of iteration is non existent
if (is.null(num_iteration)) {
num_iteration <- self$best_iter
}
......@@ -495,11 +473,10 @@ Booster <- R6::R6Class(
reshape = FALSE,
...) {
# Check if number of iteration is non existent
if (is.null(num_iteration)) {
num_iteration <- self$best_iter
}
# Check if start iteration is non existent
if (is.null(start_iteration)) {
start_iteration <- 0L
}
......@@ -565,7 +542,6 @@ Booster <- R6::R6Class(
# Store data name
data_name <- private$name_train_set
# Check for id bigger than 1
if (idx > 1L) {
data_name <- private$name_valid_sets[[idx - 1L]]
}
......@@ -609,14 +585,12 @@ Booster <- R6::R6Class(
# Get evaluation information
get_eval_info = function() {
# Check for evaluation names emptiness
if (is.null(private$eval_names)) {
eval_names <- .Call(
LGBM_BoosterGetEvalNames_R
, private$handle
)
# Check names' length
if (length(eval_names) > 0L) {
# Parse and store privately names
......@@ -635,7 +609,6 @@ Booster <- R6::R6Class(
},
# Perform inner evaluation
inner_eval = function(data_name, data_idx, feval = NULL) {
# Check for unknown dataset (over the maximum provided range)
......@@ -643,13 +616,10 @@ Booster <- R6::R6Class(
stop("data_idx should not be greater than num_dataset")
}
# Get evaluation information
private$get_eval_info()
# Prepare return
ret <- list()
# Check evaluation names existence
if (length(private$eval_names) > 0L) {
# Create evaluation values
......@@ -661,7 +631,6 @@ Booster <- R6::R6Class(
, tmp_vals
)
# Loop through all evaluation names
for (i in seq_along(private$eval_names)) {
# Store evaluation and append to return
......@@ -684,7 +653,6 @@ Booster <- R6::R6Class(
stop("lgb.Booster.eval: feval should be a function")
}
# Prepare data
data <- private$train_set
# Check if data to assess is existing differently
......@@ -695,7 +663,6 @@ Booster <- R6::R6Class(
# Perform function evaluation
res <- feval(private$inner_predict(data_idx), data)
# Check for name correctness
if (is.null(res$name) || is.null(res$value) || is.null(res$higher_better)) {
stop("lgb.Booster.eval: custom eval function should return a
list with attribute (name, value, higher_better)");
......@@ -780,7 +747,6 @@ predict.lgb.Booster <- function(object,
stop("predict.lgb.Booster: object should be an ", sQuote("lgb.Booster"))
}
# Return booster predictions
return(
object$predict(
data = data
......@@ -995,12 +961,10 @@ lgb.dump <- function(booster, num_iteration = NULL) {
#' @export
lgb.get.eval.result <- function(booster, data_name, eval_name, iters = NULL, is_err = FALSE) {
# Check if booster is booster
if (!lgb.is.Booster(x = booster)) {
stop("lgb.get.eval.result: Can only use ", sQuote("lgb.Booster"), " to get eval result")
}
# Check if data and evaluation name are characters or not
if (!is.character(data_name) || !is.character(eval_name)) {
stop("lgb.get.eval.result: data_name and eval_name should be characters")
}
......@@ -1039,7 +1003,6 @@ lgb.get.eval.result <- function(booster, data_name, eval_name, iters = NULL, is_
result <- booster$record_evals[[data_name]][[eval_name]][[.EVAL_ERR_KEY()]]
}
# Check if iteration is non existant
if (is.null(iters)) {
return(as.numeric(result))
}
......@@ -1049,6 +1012,5 @@ lgb.get.eval.result <- function(booster, data_name, eval_name, iters = NULL, is_
delta <- booster$record_evals$start_iter - 1.0
iters <- iters - delta
# Return requested result
return(as.numeric(result[iters]))
}
......@@ -969,7 +969,6 @@ slice.lgb.Dataset <- function(dataset, idxset, ...) {
stop("slice.lgb.Dataset: input dataset should be an lgb.Dataset object")
}
# Return sliced set
return(invisible(dataset$slice(idxset = idxset, ...)))
}
......@@ -1076,7 +1075,6 @@ setinfo.lgb.Dataset <- function(dataset, name, info, ...) {
stop("setinfo.lgb.Dataset: input dataset should be an lgb.Dataset object")
}
# Set information
return(invisible(dataset$setinfo(name = name, info = info)))
}
......@@ -1108,7 +1106,6 @@ lgb.Dataset.set.categorical <- function(dataset, categorical_feature) {
stop("lgb.Dataset.set.categorical: input dataset should be an lgb.Dataset object")
}
# Set categoricals
return(invisible(dataset$set_categorical_feature(categorical_feature = categorical_feature)))
}
......@@ -1135,12 +1132,10 @@ lgb.Dataset.set.categorical <- function(dataset, categorical_feature) {
#' @export
lgb.Dataset.set.reference <- function(dataset, reference) {
# Check if dataset is not a dataset
if (!lgb.is.Dataset(x = dataset)) {
stop("lgb.Dataset.set.reference: input dataset should be an lgb.Dataset object")
}
# Set reference
return(invisible(dataset$set_reference(reference = reference)))
}
......@@ -1163,16 +1158,13 @@ lgb.Dataset.set.reference <- function(dataset, reference) {
#' @export
lgb.Dataset.save <- function(dataset, fname) {
# Check if dataset is not a dataset
if (!lgb.is.Dataset(x = dataset)) {
stop("lgb.Dataset.set: input dataset should be an lgb.Dataset object")
}
# File-type is not matching
if (!is.character(fname)) {
stop("lgb.Dataset.set: fname should be a character or a file connection")
}
# Store binary
return(invisible(dataset$save_binary(fname = fname)))
}
......@@ -30,7 +30,6 @@ Predictor <- R6::R6Class(
private$params <- lgb.params2str(params = params)
handle <- NULL
# Check if handle is a character
if (is.character(modelfile)) {
# Create handle on it
......
......@@ -189,7 +189,6 @@ lgb.cv <- function(params = list()
, column_names = cnames
)
# Check for weights
if (!is.null(weight)) {
data$setinfo(name = "weight", info = weight)
}
......@@ -210,7 +209,6 @@ lgb.cv <- function(params = list()
data$set_categorical_feature(categorical_feature = categorical_feature)
}
# Check for folds
if (!is.null(folds)) {
# Check for list of folds or for single value
......@@ -218,12 +216,10 @@ lgb.cv <- function(params = list()
stop(sQuote("folds"), " must be a list with 2 or more elements that are vectors of indices for each CV-fold")
}
# Set number of folds
nfold <- length(folds)
} else {
# Check fold value
if (nfold <= 1L) {
stop(sQuote("nfold"), " must be > 1")
}
......@@ -583,15 +579,12 @@ lgb.stratified.folds <- function(y, k) {
lgb.merge.cv.result <- function(msg, showsd) {
# Get CV message length
if (length(msg) == 0L) {
stop("lgb.cv: size of cv result error")
}
# Get evaluation message length
eval_len <- length(msg[[1L]])
# Is evaluation message empty?
if (eval_len == 0L) {
stop("lgb.cv: should provide at least one metric for CV")
}
......@@ -606,7 +599,6 @@ lgb.merge.cv.result <- function(msg, showsd) {
# get structure (name, higher_better, data_name)
ret_eval <- msg[[1L]]
# Go through evaluation length items
for (j in seq_len(eval_len)) {
ret_eval[[j]]$value <- mean(eval_result[[j]])
}
......@@ -624,12 +616,10 @@ lgb.merge.cv.result <- function(msg, showsd) {
)
}
# Convert to list
ret_eval_err <- as.list(ret_eval_err)
}
# Return errors
return(
list(
eval_list = ret_eval
......
......@@ -78,7 +78,6 @@ lgb.interprete <- function(model,
}
)
# Sequence over idxset
for (i in seq_along(idxset)) {
tree_interpretation_dt_list[[i]] <- single.row.interprete(
tree_dt = tree_dt
......@@ -113,10 +112,8 @@ single.tree.interprete <- function(tree_dt,
# Get to root from leaf
leaf_to_root <- function(parent_id, current_value) {
# Store value
value_seq <<- c(current_value, value_seq)
# Check for null parent id
if (!is.na(parent_id)) {
# Not null means existing node
......@@ -151,7 +148,6 @@ multiple.tree.interprete <- function(tree_dt,
tree_index,
leaf_index) {
# Apply each trees
interp_dt <- data.table::rbindlist(
l = mapply(
FUN = single.tree.interprete
......
......@@ -50,10 +50,8 @@
#' @export
lgb.model.dt.tree <- function(model, num_iteration = NULL) {
# Dump json model first
json_model <- lgb.dump(booster = model, num_iteration = num_iteration)
# Parse json model second
parsed_json_model <- jsonlite::fromJSON(
txt = json_model
, simplifyVector = TRUE
......@@ -62,10 +60,10 @@ lgb.model.dt.tree <- function(model, num_iteration = NULL) {
, flatten = FALSE
)
# Parse tree model third
# Parse tree model
tree_list <- lapply(parsed_json_model$tree_info, single.tree.parse)
# Combine into single data.table fourth
# Combine into single data.table
tree_dt <- data.table::rbindlist(l = tree_list, use.names = TRUE)
# Substitute feature index with the actual feature name
......
......@@ -61,7 +61,6 @@ lgb.plot.interpretation <- function(tree_interpretation_dt,
left_margin = 10L,
cex = NULL) {
# Get number of columns
num_class <- ncol(tree_interpretation_dt) - 1L
# Refresh plot
......@@ -82,7 +81,6 @@ lgb.plot.interpretation <- function(tree_interpretation_dt,
)
)
# Check for number of classes
if (num_class == 1L) {
# Only one class, plot straight away
......
......@@ -188,7 +188,6 @@ lgb.train <- function(params = list(),
# Parse validation datasets
if (length(valids) > 0L) {
# Loop through all validation datasets using name
for (key in names(valids)) {
# Use names to get validation datasets
......
......@@ -21,7 +21,6 @@ lgb.is.null.handle <- function(x) {
lgb.params2str <- function(params) {
# Check for a list as input
if (!identical(class(params), "list")) {
stop("params must be a list")
}
......@@ -29,7 +28,6 @@ lgb.params2str <- function(params) {
# Split parameter names
names(params) <- gsub("\\.", "_", names(params))
# Setup temporary variable
ret <- list()
# Perform key value join
......@@ -54,7 +52,6 @@ lgb.params2str <- function(params) {
}
# Check ret length
if (length(ret) == 0L) {
return("")
}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment