test_parameters.R 3.06 KB
Newer Older
1
2
3

context("feature penalties")

4
5
data(agaricus.train, package = "lightgbm")
data(agaricus.test, package = "lightgbm")
6
7
8
9
10
11
train <- agaricus.train
test <- agaricus.test

test_that("Feature penalties work properly", {
  # Fit a series of models with varying penalty on most important variable
  var_name <- "odor=none"
12
  var_index <- which(train$data@Dimnames[[2L]] == var_name)
13

14
15
  bst <- lapply(seq(1.0, 0.0, by = -0.1), function(x) {
    feature_penalties <- rep(1.0, ncol(train$data))
16
17
    feature_penalties[var_index] <- x
    lightgbm(
18
19
      data = train$data
      , label = train$label
20
      , num_leaves = 5L
21
      , learning_rate = 0.05
22
      , nrounds = 5L
23
24
25
      , objective = "binary"
      , feature_penalty = paste0(feature_penalties, collapse = ",")
      , metric = "binary_error"
26
      , verbose = -1L
27
      , save_name = tempfile(fileext = ".model")
28
29
    )
  })
30

31
32
33
  var_gain <- lapply(bst, function(x) lgb.importance(x)[Feature == var_name, Gain])
  var_cover <- lapply(bst, function(x) lgb.importance(x)[Feature == var_name, Cover])
  var_freq <- lapply(bst, function(x) lgb.importance(x)[Feature == var_name, Frequency])
34

35
  # Ensure that feature gain, cover, and frequency decreases with stronger penalties
36
37
38
  expect_true(all(diff(unlist(var_gain)) <= 0.0))
  expect_true(all(diff(unlist(var_cover)) <= 0.0))
  expect_true(all(diff(unlist(var_freq)) <= 0.0))
39

40
41
42
  expect_lt(min(diff(unlist(var_gain))), 0.0)
  expect_lt(min(diff(unlist(var_cover))), 0.0)
  expect_lt(min(diff(unlist(var_freq))), 0.0)
43

44
  # Ensure that feature is not used when feature_penalty = 0
45
  expect_length(var_gain[[length(var_gain)]], 0L)
46
})
47

48
49
50
context("parameter aliases")

test_that(".PARAMETER_ALIASES() returns a named list of character vectors, where names are unique", {
51
  param_aliases <- .PARAMETER_ALIASES()
52
  expect_identical(class(param_aliases), "list")
53
  expect_true(length(param_aliases) > 100L)
54
55
56
57
  expect_true(is.character(names(param_aliases)))
  expect_true(is.character(param_aliases[["boosting"]]))
  expect_true(is.character(param_aliases[["early_stopping_round"]]))
  expect_true(is.character(param_aliases[["num_iterations"]]))
58
  expect_true(is.character(param_aliases[["pre_partition"]]))
59
60
61
  expect_true(length(names(param_aliases)) == length(param_aliases))
  expect_true(all(sapply(param_aliases, is.character)))
  expect_true(length(unique(names(param_aliases))) == length(param_aliases))
62
  expect_equal(sort(param_aliases[["task"]]), c("task", "task_type"))
63
64
})

65
test_that("training should warn if you use 'dart' boosting, specified with 'boosting' or aliases", {
66
  for (boosting_param in .PARAMETER_ALIASES()[["boosting"]]) {
67
68
69
70
    expect_warning({
      result <- lightgbm(
        data = train$data
        , label = train$label
71
        , num_leaves = 5L
72
        , learning_rate = 0.05
73
        , nrounds = 5L
74
75
        , objective = "binary"
        , metric = "binary_error"
76
        , verbose = -1L
77
78
79
80
        , params = stats::setNames(
          object = "dart"
          , nm = boosting_param
        )
81
        , save_name = tempfile(fileext = ".model")
82
83
84
85
      )
    }, regexp = "Early stopping is not available in 'dart' mode")
  }
})