test_custom_objective.R 1.4 KB
Newer Older
1
context("Test models with custom objective")
Guolin Ke's avatar
Guolin Ke committed
2

3
4
data(agaricus.train, package = "lightgbm")
data(agaricus.test, package = "lightgbm")
Guolin Ke's avatar
Guolin Ke committed
5
6
7
8
9
10
dtrain <- lgb.Dataset(agaricus.train$data, label = agaricus.train$label)
dtest <- lgb.Dataset(agaricus.test$data, label = agaricus.test$label)
watchlist <- list(eval = dtest, train = dtrain)

logregobj <- function(preds, dtrain) {
  labels <- getinfo(dtrain, "label")
11
  preds <- 1.0 / (1.0 + exp(-preds))
Guolin Ke's avatar
Guolin Ke committed
12
  grad <- preds - labels
13
  hess <- preds * (1.0 - preds)
Guolin Ke's avatar
Guolin Ke committed
14
15
16
  return(list(grad = grad, hess = hess))
}

17
18
19
20
# User-defined evaluation function returns a pair (metric_name, result, higher_better)
# NOTE: when you do customized loss function, the default prediction value is margin
# This may make built-in evalution metric calculate wrong results
# Keep this in mind when you use the customization, and maybe you need write customized evaluation function
Guolin Ke's avatar
Guolin Ke committed
21
22
evalerror <- function(preds, dtrain) {
  labels <- getinfo(dtrain, "label")
23
24
  preds <- 1.0 / (1.0 + exp(-preds))
  err <- as.numeric(sum(labels != (preds > 0.5))) / length(labels)
25
26
27
28
29
  return(list(
    name = "error"
    , value = err
    , higher_better = FALSE
  ))
Guolin Ke's avatar
Guolin Ke committed
30
31
}

32
param <- list(
33
34
  num_leaves = 8L
  , learning_rate = 1.0
35
36
37
  , objective = logregobj
  , metric = "auc"
)
38
num_round <- 10L
Guolin Ke's avatar
Guolin Ke committed
39
40

test_that("custom objective works", {
41
  bst <- lgb.train(param, dtrain, num_round, watchlist, eval = evalerror)
Guolin Ke's avatar
Guolin Ke committed
42
43
  expect_false(is.null(bst$record_evals))
})