lgb.train.Rd 3.45 KB
Newer Older
Guolin Ke's avatar
Guolin Ke committed
1
% Generated by roxygen2: do not edit by hand
James Lamb's avatar
James Lamb committed
2
3
% Please edit documentation in R/lgb.train.R
\name{lgb.train}
Guolin Ke's avatar
Guolin Ke committed
4
\alias{lgb.train}
James Lamb's avatar
James Lamb committed
5
\title{Main training logic for LightGBM}
Guolin Ke's avatar
Guolin Ke committed
6
7
8
9
\usage{
lgb.train(params = list(), data, nrounds = 10, valids = list(),
  obj = NULL, eval = NULL, verbose = 1, record = TRUE, eval_freq = 1L,
  init_model = NULL, colnames = NULL, categorical_feature = NULL,
10
11
  early_stopping_rounds = NULL, callbacks = list(), reset_data = FALSE,
  ...)
Guolin Ke's avatar
Guolin Ke committed
12
13
14
15
16
17
18
19
}
\arguments{
\item{params}{List of parameters}

\item{data}{a \code{lgb.Dataset} object, used for training}

\item{nrounds}{number of training rounds}

James Lamb's avatar
James Lamb committed
20
21
\item{valids}{a list of \code{lgb.Dataset} objects, used for validation}

22
\item{obj}{objective function, can be character or custom objective function. Examples include
Guolin Ke's avatar
Guolin Ke committed
23
24
25
26
27
\code{regression}, \code{regression_l1}, \code{huber},
\code{binary}, \code{lambdarank}, \code{multiclass}, \code{multiclass}}

\item{eval}{evaluation function, can be (a list of) character or custom eval function}

James Lamb's avatar
James Lamb committed
28
\item{verbose}{verbosity for output, if <= 0, also will disable the print of evaluation during training}
Guolin Ke's avatar
Guolin Ke committed
29
30
31

\item{record}{Boolean, TRUE will record iteration message to \code{booster$record_evals}}

James Lamb's avatar
James Lamb committed
32
\item{eval_freq}{evaluation output frequency, only effect when verbose > 0}
Guolin Ke's avatar
Guolin Ke committed
33
34
35
36
37
38
39
40
41
42
43

\item{init_model}{path of model file of \code{lgb.Booster} object, will continue training from this model}

\item{colnames}{feature names, if not null, will use this to overwrite the names in dataset}

\item{categorical_feature}{list of str or int
type int represents index,
type str represents feature names}

\item{early_stopping_rounds}{int
Activates early stopping.
James Lamb's avatar
James Lamb committed
44
45
46
Requires at least one validation data and one metric
If there's more than one, will check all of them except the training data
Returns the model with (best_iter + early_stopping_rounds)
Guolin Ke's avatar
Guolin Ke committed
47
48
49
50
51
If early stopping occurs, the model will have 'best_iter' field}

\item{callbacks}{list of callback functions
List of callback functions that are applied at each iteration.}

James Lamb's avatar
James Lamb committed
52
53
54
55
56
57
\item{reset_data}{Boolean, setting it to TRUE (not the default value) will transform the booster model into a predictor model which frees up memory and the original datasets}

\item{...}{other parameters, see Parameters.rst for more information. A few key parameters:
\itemize{
    \item{boosting}{Boosting type. \code{"gbdt"} or \code{"dart"}}
    \item{num_leaves}{number of leaves in one tree. defaults to 127}
58
    \item{max_depth}{Limit the max depth for tree model. This is used to deal with
James Lamb's avatar
James Lamb committed
59
60
                     overfit when #data is small. Tree still grow by leaf-wise.}
    \item{num_threads}{Number of threads for LightGBM. For the best speed, set this to
61
                       the number of real CPU cores, not the number of threads (most
James Lamb's avatar
James Lamb committed
62
63
                       CPU using hyper-threading to generate 2 threads per CPU core).}
}}
Guolin Ke's avatar
Guolin Ke committed
64
65
66
67
68
}
\value{
a trained booster model \code{lgb.Booster}.
}
\description{
James Lamb's avatar
James Lamb committed
69
Logic to train with LightGBM
Guolin Ke's avatar
Guolin Ke committed
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
}
\examples{
library(lightgbm)
data(agaricus.train, package = "lightgbm")
train <- agaricus.train
dtrain <- lgb.Dataset(train$data, label = train$label)
data(agaricus.test, package = "lightgbm")
test <- agaricus.test
dtest <- lgb.Dataset.create.valid(dtrain, test$data, label = test$label)
params <- list(objective = "regression", metric = "l2")
valids <- list(test = dtest)
model <- lgb.train(params,
                   dtrain,
                   100,
                   valids,
                   min_data = 1,
                   learning_rate = 1,
                   early_stopping_rounds = 10)

}