config.h 61.1 KB
Newer Older
1
2
3
4
5
/*!
 * Copyright (c) 2016 Microsoft Corporation. All rights reserved.
 * Licensed under the MIT License. See LICENSE file in the project root for license information.
 *
 * \note
6
7
8
9
 * - desc and descl2 fields must be written in reStructuredText format;
 * - nested sections can be placed only at the bottom of parent's section;
 * - [doc-only] tag indicates that only documentation for this param should be generated and all other actions are performed manually;
 * - [no-save] tag indicates that this param should not be saved into a model text representation.
10
 */
Guolin Ke's avatar
Guolin Ke committed
11
12
13
#ifndef LIGHTGBM_CONFIG_H_
#define LIGHTGBM_CONFIG_H_

14
15
16
17
18
#include <LightGBM/export.h>
#include <LightGBM/meta.h>
#include <LightGBM/utils/common.h>
#include <LightGBM/utils/log.h>

Guolin Ke's avatar
Guolin Ke committed
19
20
#include <string>
#include <algorithm>
Guolin Ke's avatar
Guolin Ke committed
21
#include <memory>
22
23
24
#include <unordered_map>
#include <unordered_set>
#include <vector>
Guolin Ke's avatar
Guolin Ke committed
25
26
27

namespace LightGBM {

Guolin Ke's avatar
Guolin Ke committed
28
29
30
31
/*! \brief Types of tasks */
enum TaskType {
  kTrain, kPredict, kConvertModel, KRefitTree
};
32
const int kDefaultNumLeaves = 31;
Guolin Ke's avatar
Guolin Ke committed
33

Guolin Ke's avatar
Guolin Ke committed
34
struct Config {
Nikita Titov's avatar
Nikita Titov committed
35
 public:
Guolin Ke's avatar
Guolin Ke committed
36
  std::string ToString() const;
Guolin Ke's avatar
Guolin Ke committed
37
38
39
40
  /*!
  * \brief Get string value by specific name of key
  * \param params Store the key and value for params
  * \param name Name of key
Hui Xue's avatar
Hui Xue committed
41
  * \param out Value will assign to out if key exists
Guolin Ke's avatar
Guolin Ke committed
42
43
  * \return True if key exists
  */
Guolin Ke's avatar
Guolin Ke committed
44
  inline static bool GetString(
Guolin Ke's avatar
Guolin Ke committed
45
46
47
48
49
50
51
    const std::unordered_map<std::string, std::string>& params,
    const std::string& name, std::string* out);

  /*!
  * \brief Get int value by specific name of key
  * \param params Store the key and value for params
  * \param name Name of key
Hui Xue's avatar
Hui Xue committed
52
  * \param out Value will assign to out if key exists
Guolin Ke's avatar
Guolin Ke committed
53
54
  * \return True if key exists
  */
Guolin Ke's avatar
Guolin Ke committed
55
  inline static bool GetInt(
Guolin Ke's avatar
Guolin Ke committed
56
57
58
59
    const std::unordered_map<std::string, std::string>& params,
    const std::string& name, int* out);

  /*!
60
  * \brief Get double value by specific name of key
Guolin Ke's avatar
Guolin Ke committed
61
62
  * \param params Store the key and value for params
  * \param name Name of key
Hui Xue's avatar
Hui Xue committed
63
  * \param out Value will assign to out if key exists
Guolin Ke's avatar
Guolin Ke committed
64
65
  * \return True if key exists
  */
Guolin Ke's avatar
Guolin Ke committed
66
  inline static bool GetDouble(
Guolin Ke's avatar
Guolin Ke committed
67
    const std::unordered_map<std::string, std::string>& params,
68
    const std::string& name, double* out);
Guolin Ke's avatar
Guolin Ke committed
69
70
71
72
73

  /*!
  * \brief Get bool value by specific name of key
  * \param params Store the key and value for params
  * \param name Name of key
Hui Xue's avatar
Hui Xue committed
74
  * \param out Value will assign to out if key exists
Guolin Ke's avatar
Guolin Ke committed
75
76
  * \return True if key exists
  */
Guolin Ke's avatar
Guolin Ke committed
77
  inline static bool GetBool(
Guolin Ke's avatar
Guolin Ke committed
78
79
    const std::unordered_map<std::string, std::string>& params,
    const std::string& name, bool* out);
80

Guolin Ke's avatar
Guolin Ke committed
81
  static void KV2Map(std::unordered_map<std::string, std::string>* params, const char* kv);
82
  static std::unordered_map<std::string, std::string> Str2Map(const char* parameters);
Guolin Ke's avatar
Guolin Ke committed
83

Guolin Ke's avatar
Guolin Ke committed
84
  #pragma region Parameters
85

Guolin Ke's avatar
Guolin Ke committed
86
87
  #pragma region Core Parameters

88
  // [no-save]
Guolin Ke's avatar
Guolin Ke committed
89
  // [doc-only]
90
91
  // alias = config_file
  // desc = path of config file
92
  // desc = **Note**: can be used only in CLI version
Guolin Ke's avatar
Guolin Ke committed
93
94
  std::string config = "";

95
  // [no-save]
Guolin Ke's avatar
Guolin Ke committed
96
  // [doc-only]
97
98
99
100
101
102
  // type = enum
  // default = train
  // options = train, predict, convert_model, refit
  // alias = task_type
  // desc = ``train``, for training, aliases: ``training``
  // desc = ``predict``, for prediction, aliases: ``prediction``, ``test``
Nikita Titov's avatar
Nikita Titov committed
103
  // desc = ``convert_model``, for converting model file into if-else format, see more information in `Convert Parameters <#convert-parameters>`__
104
  // desc = ``refit``, for refitting existing models with new data, aliases: ``refit_tree``
Guolin Ke's avatar
Guolin Ke committed
105
  // desc = **Note**: can be used only in CLI version; for language-specific packages you can use the correspondent functions
Guolin Ke's avatar
Guolin Ke committed
106
107
108
  TaskType task = TaskType::kTrain;

  // [doc-only]
109
  // type = enum
110
  // options = regression, regression_l1, huber, fair, poisson, quantile, mape, gamma, tweedie, binary, multiclass, multiclassova, cross_entropy, cross_entropy_lambda, lambdarank, rank_xendcg
111
112
  // alias = objective_type, app, application
  // desc = regression application
Guolin Ke's avatar
Guolin Ke committed
113
114
  // descl2 = ``regression``, L2 loss, aliases: ``regression_l2``, ``l2``, ``mean_squared_error``, ``mse``, ``l2_root``, ``root_mean_squared_error``, ``rmse``
  // descl2 = ``regression_l1``, L1 loss, aliases: ``l1``, ``mean_absolute_error``, ``mae``
115
116
117
118
119
  // descl2 = ``huber``, `Huber loss <https://en.wikipedia.org/wiki/Huber_loss>`__
  // descl2 = ``fair``, `Fair loss <https://www.kaggle.com/c/allstate-claims-severity/discussion/24520>`__
  // descl2 = ``poisson``, `Poisson regression <https://en.wikipedia.org/wiki/Poisson_regression>`__
  // descl2 = ``quantile``, `Quantile regression <https://en.wikipedia.org/wiki/Quantile_regression>`__
  // descl2 = ``mape``, `MAPE loss <https://en.wikipedia.org/wiki/Mean_absolute_percentage_error>`__, aliases: ``mean_absolute_percentage_error``
120
  // descl2 = ``gamma``, Gamma regression with log-link. It might be useful, e.g., for modeling insurance claims severity, or for any target that might be `gamma-distributed <https://en.wikipedia.org/wiki/Gamma_distribution#Occurrence_and_applications>`__
121
  // descl2 = ``tweedie``, Tweedie regression with log-link. It might be useful, e.g., for modeling total loss in insurance, or for any target that might be `tweedie-distributed <https://en.wikipedia.org/wiki/Tweedie_distribution#Occurrence_and_applications>`__
122
123
124
  // desc = binary classification application
  // descl2 = ``binary``, binary `log loss <https://en.wikipedia.org/wiki/Cross_entropy>`__ classification (or logistic regression)
  // descl2 = requires labels in {0, 1}; see ``cross-entropy`` application for general probability labels in [0, 1]
125
126
127
128
129
  // desc = multi-class classification application
  // descl2 = ``multiclass``, `softmax <https://en.wikipedia.org/wiki/Softmax_function>`__ objective function, aliases: ``softmax``
  // descl2 = ``multiclassova``, `One-vs-All <https://en.wikipedia.org/wiki/Multiclass_classification#One-vs.-rest>`__ binary objective function, aliases: ``multiclass_ova``, ``ova``, ``ovr``
  // descl2 = ``num_class`` should be set as well
  // desc = cross-entropy application
Guolin Ke's avatar
Guolin Ke committed
130
131
  // descl2 = ``cross_entropy``, objective function for cross-entropy (with optional linear weights), aliases: ``xentropy``
  // descl2 = ``cross_entropy_lambda``, alternative parameterization of cross-entropy, aliases: ``xentlambda``
132
  // descl2 = label is anything in interval [0, 1]
133
  // desc = ranking application
134
  // descl2 = ``lambdarank``, `lambdarank <https://papers.nips.cc/paper/2971-learning-to-rank-with-nonsmooth-cost-functions.pdf>`__ objective. `label_gain <#label_gain>`__ can be used to set the gain (weight) of ``int`` label and all values in ``label`` must be smaller than number of elements in ``label_gain``
135
136
  // descl2 = ``rank_xendcg``, `XE_NDCG_MART <https://arxiv.org/abs/1911.09798>`__ ranking objective function, aliases: ``xendcg``, ``xe_ndcg``, ``xe_ndcg_mart``, ``xendcg_mart``
  // descl2 = ``rank_xendcg`` is faster than and achieves the similar performance as ``lambdarank``
137
  // descl2 = label should be ``int`` type, and larger number represents the higher relevance (e.g. 0:bad, 1:fair, 2:good, 3:perfect)
Guolin Ke's avatar
Guolin Ke committed
138
139
140
  std::string objective = "regression";

  // [doc-only]
141
142
  // type = enum
  // alias = boosting_type, boost
143
  // options = gbdt, rf, dart, goss
144
145
  // desc = ``gbdt``, traditional Gradient Boosting Decision Tree, aliases: ``gbrt``
  // desc = ``rf``, Random Forest, aliases: ``random_forest``
146
147
  // desc = ``dart``, `Dropouts meet Multiple Additive Regression Trees <https://arxiv.org/abs/1505.01866>`__
  // desc = ``goss``, Gradient-based One-Side Sampling
Nikita Titov's avatar
Nikita Titov committed
148
  // descl2 = **Note**: internally, LightGBM uses ``gbdt`` mode for the first ``1 / learning_rate`` iterations
Guolin Ke's avatar
Guolin Ke committed
149
150
  std::string boosting = "gbdt";

151
  // alias = train, train_data, train_data_file, data_filename
152
  // desc = path of training data, LightGBM will train from this data
153
  // desc = **Note**: can be used only in CLI version
Guolin Ke's avatar
Guolin Ke committed
154
155
  std::string data = "";

156
  // alias = test, valid_data, valid_data_file, test_data, test_data_file, valid_filenames
157
  // default = ""
158
  // desc = path(s) of validation/test data, LightGBM will output metrics for these data
159
  // desc = support multiple validation data, separated by ``,``
160
  // desc = **Note**: can be used only in CLI version
Guolin Ke's avatar
Guolin Ke committed
161
162
  std::vector<std::string> valid;

163
  // alias = num_iteration, n_iter, num_tree, num_trees, num_round, num_rounds, num_boost_round, n_estimators
164
165
166
  // check = >=0
  // desc = number of boosting iterations
  // desc = **Note**: internally, LightGBM constructs ``num_class * num_iterations`` trees for multi-class classification problems
Guolin Ke's avatar
Guolin Ke committed
167
  int num_iterations = 100;
Guolin Ke's avatar
Guolin Ke committed
168

169
  // alias = shrinkage_rate, eta
170
  // check = >0.0
171
172
  // desc = shrinkage rate
  // desc = in ``dart``, it also affects on normalization weights of dropped trees
Guolin Ke's avatar
Guolin Ke committed
173
174
  double learning_rate = 0.1;

175
  // default = 31
176
  // alias = num_leaf, max_leaves, max_leaf
177
  // check = >1
178
  // check = <=131072
179
  // desc = max number of leaves in one tree
Guolin Ke's avatar
Guolin Ke committed
180
181
182
  int num_leaves = kDefaultNumLeaves;

  // [doc-only]
183
184
  // type = enum
  // options = serial, feature, data, voting
185
  // alias = tree, tree_type, tree_learner_type
186
187
188
189
190
  // desc = ``serial``, single machine tree learner
  // desc = ``feature``, feature parallel tree learner, aliases: ``feature_parallel``
  // desc = ``data``, data parallel tree learner, aliases: ``data_parallel``
  // desc = ``voting``, voting parallel tree learner, aliases: ``voting_parallel``
  // desc = refer to `Parallel Learning Guide <./Parallel-Learning-Guide.rst>`__ to get more details
Guolin Ke's avatar
Guolin Ke committed
191
192
  std::string tree_learner = "serial";

193
  // alias = num_thread, nthread, nthreads, n_jobs
Guolin Ke's avatar
Guolin Ke committed
194
  // desc = number of threads for LightGBM
195
196
197
198
199
  // desc = ``0`` means default number of threads in OpenMP
  // desc = for the best speed, set this to the number of **real CPU cores**, not the number of threads (most CPUs use `hyper-threading <https://en.wikipedia.org/wiki/Hyper-threading>`__ to generate 2 threads per CPU core)
  // desc = do not set it too large if your dataset is small (for instance, do not use 64 threads for a dataset with 10,000 rows)
  // desc = be aware a task manager or any similar CPU monitoring tool might report that cores not being fully utilized. **This is normal**
  // desc = for parallel learning, do not use all CPU cores because this will cause poor performance for the network communication
200
  // desc = **Note**: please **don't** change this during training, especially when running multiple jobs simultaneously by external packages, otherwise it may cause undesirable errors
Guolin Ke's avatar
Guolin Ke committed
201
202
203
  int num_threads = 0;

  // [doc-only]
204
205
  // type = enum
  // options = cpu, gpu
206
  // alias = device
207
208
209
210
  // desc = device for the tree learning, you can use GPU to achieve the faster learning
  // desc = **Note**: it is recommended to use the smaller ``max_bin`` (e.g. 63) to get the better speed up
  // desc = **Note**: for the faster speed, GPU uses 32-bit float point to sum up by default, so this may affect the accuracy for some tasks. You can set ``gpu_use_dp=true`` to enable 64-bit float point, but it will slow down the training
  // desc = **Note**: refer to `Installation Guide <./Installation-Guide.rst#build-gpu-version>`__ to build LightGBM with GPU support
Guolin Ke's avatar
Guolin Ke committed
211
212
213
  std::string device_type = "cpu";

  // [doc-only]
214
  // alias = random_seed, random_state
215
216
217
218
  // default = None
  // desc = this seed is used to generate other seeds, e.g. ``data_random_seed``, ``feature_fraction_seed``, etc.
  // desc = by default, this seed is unused in favor of default values of other seeds
  // desc = this seed has lower priority in comparison with other seeds, which means that it will be overridden, if you set other seeds explicitly
Guolin Ke's avatar
Guolin Ke committed
219
220
  int seed = 0;

Guolin Ke's avatar
Guolin Ke committed
221
222
223
224
225
226
227
  // desc = used only with ``cpu`` device type
  // desc = setting this to ``true`` should ensure the stable results when using the same data and the same parameters (and different ``num_threads``)
  // desc = when you use the different seeds, different LightGBM versions, the binaries compiled by different compilers, or in different systems, the results are expected to be different
  // desc = you can `raise issues <https://github.com/microsoft/LightGBM/issues>`__ in LightGBM GitHub repo when you meet the unstable results
  // desc = **Note**: setting this to ``true`` may slow down the training
  bool deterministic = false;

Guolin Ke's avatar
Guolin Ke committed
228
229
230
231
  #pragma endregion

  #pragma region Learning Control Parameters

232
233
234
235
  // desc = used only with ``cpu`` device type
  // desc = set this to ``true`` to force col-wise histogram building
  // desc = enabling this is recommended when:
  // descl2 = the number of columns is large, or the total number of bins is large
Nikita Titov's avatar
Nikita Titov committed
236
  // descl2 = ``num_threads`` is large, e.g. ``> 20``
237
238
239
  // descl2 = you want to reduce memory cost
  // desc = **Note**: when both ``force_col_wise`` and ``force_row_wise`` are ``false``, LightGBM will firstly try them both, and then use the faster one. To remove the overhead of testing set the faster one to ``true`` manually
  // desc = **Note**: this parameter cannot be used at the same time with ``force_row_wise``, choose only one of them
240
241
  bool force_col_wise = false;

242
243
244
245
  // desc = used only with ``cpu`` device type
  // desc = set this to ``true`` to force row-wise histogram building
  // desc = enabling this is recommended when:
  // descl2 = the number of data points is large, and the total number of bins is relatively small
Nikita Titov's avatar
Nikita Titov committed
246
  // descl2 = ``num_threads`` is relatively small, e.g. ``<= 16``
247
248
249
250
  // descl2 = you want to use small ``bagging_fraction`` or ``goss`` boosting to speed up
  // desc = **Note**: setting this to ``true`` will double the memory cost for Dataset object. If you have not enough memory, you can try setting ``force_col_wise=true``
  // desc = **Note**: when both ``force_col_wise`` and ``force_row_wise`` are ``false``, LightGBM will firstly try them both, and then use the faster one. To remove the overhead of testing set the faster one to ``true`` manually
  // desc = **Note**: this parameter cannot be used at the same time with ``force_col_wise``, choose only one of them
251
252
  bool force_row_wise = false;

253
254
255
256
257
  // alias = hist_pool_size
  // desc = max cache size in MB for historical histogram
  // desc = ``< 0`` means no limit
  double histogram_pool_size = -1.0;

258
  // desc = limit the max depth for tree model. This is used to deal with over-fitting when ``#data`` is small. Tree still grows leaf-wise
259
  // desc = ``<= 0`` means no limit
Guolin Ke's avatar
Guolin Ke committed
260
261
262
  int max_depth = -1;

  // alias = min_data_per_leaf, min_data, min_child_samples
263
264
  // check = >=0
  // desc = minimal number of data in one leaf. Can be used to deal with over-fitting
Guolin Ke's avatar
Guolin Ke committed
265
266
  int min_data_in_leaf = 20;

267
268
269
  // alias = min_sum_hessian_per_leaf, min_sum_hessian, min_hessian, min_child_weight
  // check = >=0.0
  // desc = minimal sum hessian in one leaf. Like ``min_data_in_leaf``, it can be used to deal with over-fitting
Guolin Ke's avatar
Guolin Ke committed
270
271
  double min_sum_hessian_in_leaf = 1e-3;

272
273
274
275
276
277
278
  // alias = sub_row, subsample, bagging
  // check = >0.0
  // check = <=1.0
  // desc = like ``feature_fraction``, but this will randomly select part of data without resampling
  // desc = can be used to speed up training
  // desc = can be used to deal with over-fitting
  // desc = **Note**: to enable bagging, ``bagging_freq`` should be set to a non zero value as well
Guolin Ke's avatar
Guolin Ke committed
279
280
  double bagging_fraction = 1.0;

Guolin Ke's avatar
Guolin Ke committed
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
  // alias = pos_sub_row, pos_subsample, pos_bagging
  // check = >0.0
  // check = <=1.0
  // desc = used only in ``binary`` application
  // desc = used for imbalanced binary classification problem, will randomly sample ``#pos_samples * pos_bagging_fraction`` positive samples in bagging
  // desc = should be used together with ``neg_bagging_fraction``
  // desc = set this to ``1.0`` to disable
  // desc = **Note**: to enable this, you need to set ``bagging_freq`` and ``neg_bagging_fraction`` as well
  // desc = **Note**: if both ``pos_bagging_fraction`` and ``neg_bagging_fraction`` are set to ``1.0``,  balanced bagging is disabled
  // desc = **Note**: if balanced bagging is enabled, ``bagging_fraction`` will be ignored
  double pos_bagging_fraction = 1.0;

  // alias = neg_sub_row, neg_subsample, neg_bagging
  // check = >0.0
  // check = <=1.0
  // desc = used only in ``binary`` application
  // desc = used for imbalanced binary classification problem, will randomly sample ``#neg_samples * neg_bagging_fraction`` negative samples in bagging
  // desc = should be used together with ``pos_bagging_fraction``
  // desc = set this to ``1.0`` to disable
  // desc = **Note**: to enable this, you need to set ``bagging_freq`` and ``pos_bagging_fraction`` as well
  // desc = **Note**: if both ``pos_bagging_fraction`` and ``neg_bagging_fraction`` are set to ``1.0``,  balanced bagging is disabled
  // desc = **Note**: if balanced bagging is enabled, ``bagging_fraction`` will be ignored
  double neg_bagging_fraction = 1.0;

305
306
  // alias = subsample_freq
  // desc = frequency for bagging
307
  // desc = ``0`` means disable bagging; ``k`` means perform bagging at every ``k`` iteration. Every ``k``-th iteration, LightGBM will randomly select ``bagging_fraction * 100 %`` of the data to use for the next ``k`` iterations
308
  // desc = **Note**: to enable bagging, ``bagging_fraction`` should be set to value smaller than ``1.0`` as well
Guolin Ke's avatar
Guolin Ke committed
309
310
311
312
313
314
315
  int bagging_freq = 0;

  // alias = bagging_fraction_seed
  // desc = random seed for bagging
  int bagging_seed = 3;

  // alias = sub_feature, colsample_bytree
316
317
  // check = >0.0
  // check = <=1.0
318
  // desc = LightGBM will randomly select a subset of features on each iteration (tree) if ``feature_fraction`` is smaller than ``1.0``. For example, if you set it to ``0.8``, LightGBM will select 80% of features before training each tree
319
320
  // desc = can be used to speed up training
  // desc = can be used to deal with over-fitting
Guolin Ke's avatar
Guolin Ke committed
321
322
  double feature_fraction = 1.0;

323
324
325
  // alias = sub_feature_bynode, colsample_bynode
  // check = >0.0
  // check = <=1.0
326
  // desc = LightGBM will randomly select a subset of features on each tree node if ``feature_fraction_bynode`` is smaller than ``1.0``. For example, if you set it to ``0.8``, LightGBM will select 80% of features at each tree node
327
328
329
330
331
  // desc = can be used to deal with over-fitting
  // desc = **Note**: unlike ``feature_fraction``, this cannot speed up training
  // desc = **Note**: if both ``feature_fraction`` and ``feature_fraction_bynode`` are smaller than ``1.0``, the final fraction of each node is ``feature_fraction * feature_fraction_bynode``
  double feature_fraction_bynode = 1.0;

332
  // desc = random seed for ``feature_fraction``
Guolin Ke's avatar
Guolin Ke committed
333
334
  int feature_fraction_seed = 2;

335
336
  // desc = use extremely randomized trees
  // desc = if set to ``true``, when evaluating node splits LightGBM will check only one randomly-chosen threshold for each feature
337
  // desc = can be used to speed up training
338
339
340
341
342
343
  // desc = can be used to deal with over-fitting
  bool extra_trees = false;

  // desc = random seed for selecting thresholds when ``extra_trees`` is true
  int extra_seed = 6;

344
  // alias = early_stopping_rounds, early_stopping, n_iter_no_change
345
346
  // desc = will stop training if one metric of one validation data doesn't improve in last ``early_stopping_round`` rounds
  // desc = ``<= 0`` means disable
347
  // desc = can be used to speed up training
Guolin Ke's avatar
Guolin Ke committed
348
349
  int early_stopping_round = 0;

350
  // desc = LightGBM allows you to provide multiple evaluation metrics. Set this to ``true``, if you want to use only the first metric for early stopping
351
352
  bool first_metric_only = false;

353
354
355
356
  // alias = max_tree_output, max_leaf_output
  // desc = used to limit the max output of tree leaves
  // desc = ``<= 0`` means no constraint
  // desc = the final max output of leaves is ``learning_rate * max_delta_step``
Guolin Ke's avatar
Guolin Ke committed
357
358
  double max_delta_step = 0.0;

359
360
361
  // alias = reg_alpha
  // check = >=0.0
  // desc = L1 regularization
Guolin Ke's avatar
Guolin Ke committed
362
363
  double lambda_l1 = 0.0;

364
  // alias = reg_lambda, lambda
365
  // check = >=0.0
Guolin Ke's avatar
Guolin Ke committed
366
367
368
  // desc = L2 regularization
  double lambda_l2 = 0.0;

369
370
371
  // alias = min_split_gain
  // check = >=0.0
  // desc = the minimal gain to perform split
372
  // desc = can be used to speed up training
Guolin Ke's avatar
Guolin Ke committed
373
374
  double min_gain_to_split = 0.0;

375
  // alias = rate_drop
376
377
378
  // check = >=0.0
  // check = <=1.0
  // desc = used only in ``dart``
379
  // desc = dropout rate: a fraction of previous trees to drop during the dropout
Guolin Ke's avatar
Guolin Ke committed
380
381
  double drop_rate = 0.1;

382
  // desc = used only in ``dart``
383
  // desc = max number of dropped trees during one boosting iteration
384
  // desc = ``<=0`` means no limit
Guolin Ke's avatar
Guolin Ke committed
385
386
  int max_drop = 50;

387
388
389
  // check = >=0.0
  // check = <=1.0
  // desc = used only in ``dart``
390
  // desc = probability of skipping the dropout procedure during a boosting iteration
Guolin Ke's avatar
Guolin Ke committed
391
392
  double skip_drop = 0.5;

393
394
  // desc = used only in ``dart``
  // desc = set this to ``true``, if you want to use xgboost dart mode
Guolin Ke's avatar
Guolin Ke committed
395
396
  bool xgboost_dart_mode = false;

397
398
  // desc = used only in ``dart``
  // desc = set this to ``true``, if you want to use uniform drop
Guolin Ke's avatar
Guolin Ke committed
399
400
  bool uniform_drop = false;

401
402
  // desc = used only in ``dart``
  // desc = random seed to choose dropping models
Guolin Ke's avatar
Guolin Ke committed
403
404
  int drop_seed = 4;

405
406
407
408
  // check = >=0.0
  // check = <=1.0
  // desc = used only in ``goss``
  // desc = the retain ratio of large gradient data
Guolin Ke's avatar
Guolin Ke committed
409
410
  double top_rate = 0.2;

411
412
413
414
  // check = >=0.0
  // check = <=1.0
  // desc = used only in ``goss``
  // desc = the retain ratio of small gradient data
Guolin Ke's avatar
Guolin Ke committed
415
416
  double other_rate = 0.1;

417
418
  // check = >0
  // desc = minimal number of data per categorical group
Guolin Ke's avatar
Guolin Ke committed
419
420
  int min_data_per_group = 100;

421
422
  // check = >0
  // desc = used for the categorical features
423
424
  // desc = limit number of split points considered for categorical features. See `the documentation on how LightGBM finds optimal splits for categorical features <./Features.rst#optimal-split-for-categorical-features>`_ for more details
  // desc = can be used to speed up training
Guolin Ke's avatar
Guolin Ke committed
425
426
  int max_cat_threshold = 32;

427
428
  // check = >=0.0
  // desc = used for the categorical features
429
  // desc = L2 regularization in categorical split
430
  double cat_l2 = 10.0;
Guolin Ke's avatar
Guolin Ke committed
431

432
433
434
435
  // check = >=0.0
  // desc = used for the categorical features
  // desc = this can reduce the effect of noises in categorical features, especially for categories with few data
  double cat_smooth = 10.0;
436

437
438
  // check = >0
  // desc = when number of categories of one feature smaller than or equal to ``max_cat_to_onehot``, one-vs-other split algorithm will be used
Guolin Ke's avatar
Guolin Ke committed
439
440
441
  int max_cat_to_onehot = 4;

  // alias = topk
442
  // check = >0
443
  // desc = used only in ``voting`` tree learner, refer to `Voting parallel <./Parallel-Learning-Guide.rst#choose-appropriate-parallel-algorithm>`__
444
  // desc = set this to larger value for more accurate result, but it will slow down the training speed
Guolin Ke's avatar
Guolin Ke committed
445
446
447
  int top_k = 20;

  // type = multi-int
448
449
450
451
452
  // alias = mc, monotone_constraint
  // default = None
  // desc = used for constraints of monotonic features
  // desc = ``1`` means increasing, ``-1`` means decreasing, ``0`` means non-constraint
  // desc = you need to specify all features in order. For example, ``mc=-1,0,1`` means decreasing for 1st feature, non-constraint for 2nd feature and increasing for the 3rd feature
Guolin Ke's avatar
Guolin Ke committed
453
  std::vector<int8_t> monotone_constraints;
Guolin Ke's avatar
Guolin Ke committed
454

Nikita Titov's avatar
Nikita Titov committed
455
  // type = enum
456
  // alias = monotone_constraining_method, mc_method
457
  // options = basic, intermediate, advanced
458
459
460
  // desc = used only if ``monotone_constraints`` is set
  // desc = monotone constraints method
  // descl2 = ``basic``, the most basic monotone constraints method. It does not slow the library at all, but over-constrains the predictions
461
462
  // descl2 = ``intermediate``, a `more advanced method <https://hal.archives-ouvertes.fr/hal-02862802/document>`__, which may slow the library very slightly. However, this method is much less constraining than the basic method and should significantly improve the results
  // descl2 = ``advanced``, an `even more advanced method <https://hal.archives-ouvertes.fr/hal-02862802/document>`__, which may slow the library. However, this method is even less constraining than the intermediate method and should again significantly improve the results
463
464
  std::string monotone_constraints_method = "basic";

465
466
467
  // alias = monotone_splits_penalty, ms_penalty, mc_penalty
  // check = >=0.0
  // desc = used only if ``monotone_constraints`` is set
468
  // desc = `monotone penalty <https://hal.archives-ouvertes.fr/hal-02862802/document>`__: a penalization parameter X forbids any monotone splits on the first X (rounded down) level(s) of the tree. The penalty applied to monotone splits on a given depth is a continuous, increasing function the penalization parameter
469
470
471
  // desc = if ``0.0`` (the default), no penalization is applied
  double monotone_penalty = 0.0;

Guolin Ke's avatar
Guolin Ke committed
472
  // type = multi-double
473
  // alias = feature_contrib, fc, fp, feature_penalty
Guolin Ke's avatar
Guolin Ke committed
474
475
476
477
  // default = None
  // desc = used to control feature's split gain, will use ``gain[i] = max(0, feature_contri[i]) * gain[i]`` to replace the split gain of i-th feature
  // desc = you need to specify all features in order
  std::vector<double> feature_contri;
478

479
480
481
482
  // alias = fs, forced_splits_filename, forced_splits_file, forced_splits
  // desc = path to a ``.json`` file that specifies splits to force at the top of every decision tree before best-first learning commences
  // desc = ``.json`` file can be arbitrarily nested, and each split contains ``feature``, ``threshold`` fields, as well as ``left`` and ``right`` fields representing subsplits
  // desc = categorical splits are forced in a one-hot fashion, with ``left`` representing the split containing the feature value and ``right`` representing other values
483
  // desc = **Note**: the forced split logic will be ignored, if the split makes gain worse
484
  // desc = see `this file <https://github.com/microsoft/LightGBM/tree/master/examples/binary_classification/forced_splits.json>`__ as an example
Guolin Ke's avatar
Guolin Ke committed
485
486
  std::string forcedsplits_filename = "";

Guolin Ke's avatar
Guolin Ke committed
487
488
489
490
491
492
  // check = >=0.0
  // check = <=1.0
  // desc = decay rate of ``refit`` task, will use ``leaf_output = refit_decay_rate * old_leaf_output + (1.0 - refit_decay_rate) * new_leaf_output`` to refit trees
  // desc = used only in ``refit`` task in CLI version or as argument in ``refit`` function in language-specific package
  double refit_decay_rate = 0.9;

493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
  // check = >=0.0
  // desc = cost-effective gradient boosting multiplier for all penalties
  double cegb_tradeoff = 1.0;

  // check = >=0.0
  // desc = cost-effective gradient-boosting penalty for splitting a node
  double cegb_penalty_split = 0.0;

  // type = multi-double
  // default = 0,0,...,0
  // desc = cost-effective gradient boosting penalty for using a feature
  // desc = applied per data point
  std::vector<double> cegb_penalty_feature_lazy;

  // type = multi-double
  // default = 0,0,...,0
  // desc = cost-effective gradient boosting penalty for using a feature
  // desc = applied once per forest
511
  std::vector<double> cegb_penalty_feature_coupled;
512

Belinda Trotta's avatar
Belinda Trotta committed
513
514
515
516
517
518
519
520
521
522
  // check = >= 0.0
  // desc = controls smoothing applied to tree nodes
  // desc = helps prevent overfitting on leaves with few samples
  // desc = if set to zero, no smoothing is applied
  // desc = if ``path_smooth > 0`` then ``min_data_in_leaf`` must be at least ``2``
  // desc = larger values give stronger regularisation
  // descl2 = the weight of each node is ``(n / path_smooth) * w + w_p / (n / path_smooth + 1)``, where ``n`` is the number of samples in the node, ``w`` is the optimal node weight to minimise the loss (approximately ``-sum_gradients / sum_hessians``), and ``w_p`` is the weight of the parent node
  // descl2 = note that the parent output ``w_p`` itself has smoothing applied, unless it is the root node, so that the smoothing effect accumulates with the tree depth
  double path_smooth = 0;

523
524
525
526
  // desc = controls which features can appear in the same branch
  // desc = by default interaction constraints are disabled, to enable them you can specify
  // descl2 = for CLI, lists separated by commas, e.g. ``[0,1,2],[2,3]``
  // descl2 = for Python-package, list of lists, e.g. ``[[0, 1, 2], [2, 3]]``
527
  // descl2 = for R-package, list of character or numeric vectors, e.g. ``list(c("var1", "var2", "var3"), c("var3", "var4"))`` or ``list(c(1L, 2L, 3L), c(3L, 4L))``. Numeric vectors should use 1-based indexing, where ``1L`` is the first feature, ``2L`` is the second feature, etc
528
529
530
  // desc = any two features can only appear in the same branch only if there exists a constraint containing both features
  std::string interaction_constraints = "";

531
532
  // alias = verbose
  // desc = controls the level of LightGBM's verbosity
533
  // desc = ``< 0``: Fatal, ``= 0``: Error (Warning), ``= 1``: Info, ``> 1``: Debug
534
535
  int verbosity = 1;

536
  // [no-save]
537
538
539
540
541
542
543
  // alias = model_input, model_in
  // desc = filename of input model
  // desc = for ``prediction`` task, this model will be applied to prediction data
  // desc = for ``train`` task, training will be continued from this model
  // desc = **Note**: can be used only in CLI version
  std::string input_model = "";

544
  // [no-save]
545
546
547
548
549
  // alias = model_output, model_out
  // desc = filename of output model in training
  // desc = **Note**: can be used only in CLI version
  std::string output_model = "LightGBM_model.txt";

550
551
552
553
554
  // desc = the feature importance type in the saved model file
  // desc = ``0``: count-based feature importance (numbers of splits are counted); ``1``: gain-based feature importance (values of gain are counted)
  // desc = **Note**: can be used only in CLI version
  int saved_feature_importance_type = 0;

555
  // [no-save]
556
557
558
559
560
561
562
563
564
565
566
567
  // alias = save_period
  // desc = frequency of saving model file snapshot
  // desc = set this to positive value to enable this function. For example, the model file will be snapshotted at each iteration if ``snapshot_freq=1``
  // desc = **Note**: can be used only in CLI version
  int snapshot_freq = -1;

  #pragma endregion

  #pragma region IO Parameters

  #pragma region Dataset Parameters

568
569
570
571
  // check = >1
  // desc = max number of bins that feature values will be bucketed in
  // desc = small number of bins may reduce training accuracy but may increase general power (deal with over-fitting)
  // desc = LightGBM will auto compress memory according to ``max_bin``. For example, LightGBM will use ``uint8_t`` for feature value if ``max_bin=255``
572
  int max_bin = 255;
Guolin Ke's avatar
Guolin Ke committed
573

Belinda Trotta's avatar
Belinda Trotta committed
574
575
576
577
  // type = multi-int
  // default = None
  // desc = max number of bins for each feature
  // desc = if not specified, will use ``max_bin`` for all features
578
  std::vector<int32_t> max_bin_by_feature;
Belinda Trotta's avatar
Belinda Trotta committed
579

580
581
582
  // check = >0
  // desc = minimal number of data inside one bin
  // desc = use this to avoid one-data-one-bin (potential over-fitting)
Guolin Ke's avatar
Guolin Ke committed
583
584
  int min_data_in_bin = 3;

585
586
  // alias = subsample_for_bin
  // check = >0
587
588
  // desc = number of data that sampled to construct feature discrete bins
  // desc = setting this to larger value will give better training result, but may increase data loading time
589
  // desc = set this to larger value if data is very sparse
590
  // desc = **Note**: don't set this to small values, otherwise, you may encounter unexpected errors and poor accuracy
591
592
  int bin_construct_sample_cnt = 200000;

593
  // alias = data_seed
594
  // desc = random seed for sampling data to construct histogram bins
Guolin Ke's avatar
Guolin Ke committed
595
  int data_random_seed = 1;
Guolin Ke's avatar
Guolin Ke committed
596

597
598
599
  // alias = is_sparse, enable_sparse, sparse
  // desc = used to enable/disable sparse optimization
  bool is_enable_sparse = true;
Guolin Ke's avatar
Guolin Ke committed
600

601
602
603
604
605
606
607
608
  // alias = is_enable_bundle, bundle
  // desc = set this to ``false`` to disable Exclusive Feature Bundling (EFB), which is described in `LightGBM: A Highly Efficient Gradient Boosting Decision Tree <https://papers.nips.cc/paper/6907-lightgbm-a-highly-efficient-gradient-boosting-decision-tree>`__
  // desc = **Note**: disabling this may cause the slow training speed for sparse datasets
  bool enable_bundle = true;

  // desc = set this to ``false`` to disable the special handle of missing value
  bool use_missing = true;

609
  // desc = set this to ``true`` to treat all zero as missing values (including the unshown values in LibSVM / sparse matrices)
610
611
612
  // desc = set this to ``false`` to use ``na`` for representing missing values
  bool zero_as_missing = false;

613
  // desc = set this to ``true`` (the default) to tell LightGBM to ignore the features that are unsplittable based on ``min_data_in_leaf``
614
615
616
617
618
619
620
621
622
  // desc = as dataset object is initialized only once and cannot be changed after that, you may need to set this to ``false`` when searching parameters with ``min_data_in_leaf``, otherwise features are filtered by ``min_data_in_leaf`` firstly if you don't reconstruct dataset object
  // desc = **Note**: setting this to ``false`` may slow down the training
  bool feature_pre_filter = true;

  // alias = is_pre_partition
  // desc = used for parallel learning (excluding the ``feature_parallel`` mode)
  // desc = ``true`` if training data are pre-partitioned, and different machines use different partitions
  bool pre_partition = false;

623
624
625
  // alias = two_round_loading, use_two_round_loading
  // desc = set this to ``true`` if data file is too big to fit in memory
  // desc = by default, LightGBM will map data file to memory and load features from memory. This will provide faster data loading speed, but may cause run out of memory error when the data file is very big
626
  // desc = **Note**: works only in case of loading data directly from file
Guolin Ke's avatar
Guolin Ke committed
627
628
629
  bool two_round = false;

  // alias = has_header
630
  // desc = set this to ``true`` if input data has header
631
  // desc = **Note**: works only in case of loading data directly from file
Guolin Ke's avatar
Guolin Ke committed
632
633
  bool header = false;

634
635
636
637
638
  // type = int or string
  // alias = label
  // desc = used to specify the label column
  // desc = use number for index, e.g. ``label=0`` means column\_0 is the label
  // desc = add a prefix ``name:`` for column name, e.g. ``label=name:is_click``
639
  // desc = **Note**: works only in case of loading data directly from file
Guolin Ke's avatar
Guolin Ke committed
640
  std::string label_column = "";
Guolin Ke's avatar
Guolin Ke committed
641

642
643
644
645
646
  // type = int or string
  // alias = weight
  // desc = used to specify the weight column
  // desc = use number for index, e.g. ``weight=0`` means column\_0 is the weight
  // desc = add a prefix ``name:`` for column name, e.g. ``weight=name:weight``
647
  // desc = **Note**: works only in case of loading data directly from file
648
  // desc = **Note**: index starts from ``0`` and it doesn't count the label column when passing type is ``int``, e.g. when label is column\_0, and weight is column\_1, the correct parameter is ``weight=0``
Guolin Ke's avatar
Guolin Ke committed
649
  std::string weight_column = "";
Guolin Ke's avatar
Guolin Ke committed
650

651
652
653
654
655
  // type = int or string
  // alias = group, group_id, query_column, query, query_id
  // desc = used to specify the query/group id column
  // desc = use number for index, e.g. ``query=0`` means column\_0 is the query id
  // desc = add a prefix ``name:`` for column name, e.g. ``query=name:query_id``
656
  // desc = **Note**: works only in case of loading data directly from file
657
658
  // desc = **Note**: data should be grouped by query\_id
  // desc = **Note**: index starts from ``0`` and it doesn't count the label column when passing type is ``int``, e.g. when label is column\_0 and query\_id is column\_1, the correct parameter is ``query=0``
Guolin Ke's avatar
Guolin Ke committed
659
  std::string group_column = "";
Guolin Ke's avatar
Guolin Ke committed
660

661
  // type = multi-int or string
Guolin Ke's avatar
Guolin Ke committed
662
  // alias = ignore_feature, blacklist
663
664
665
666
667
  // desc = used to specify some ignoring columns in training
  // desc = use number for index, e.g. ``ignore_column=0,1,2`` means column\_0, column\_1 and column\_2 will be ignored
  // desc = add a prefix ``name:`` for column name, e.g. ``ignore_column=name:c1,c2,c3`` means c1, c2 and c3 will be ignored
  // desc = **Note**: works only in case of loading data directly from file
  // desc = **Note**: index starts from ``0`` and it doesn't count the label column when passing type is ``int``
668
  // desc = **Note**: despite the fact that specified columns will be completely ignored during the training, they still should have a valid format allowing LightGBM to load file successfully
Guolin Ke's avatar
Guolin Ke committed
669
  std::string ignore_column = "";
670

671
672
673
674
675
  // type = multi-int or string
  // alias = cat_feature, categorical_column, cat_column
  // desc = used to specify categorical features
  // desc = use number for index, e.g. ``categorical_feature=0,1,2`` means column\_0, column\_1 and column\_2 are categorical features
  // desc = add a prefix ``name:`` for column name, e.g. ``categorical_feature=name:c1,c2,c3`` means c1, c2 and c3 are categorical features
676
  // desc = **Note**: only supports categorical with ``int`` type (not applicable for data represented as pandas DataFrame in Python-package)
677
678
  // desc = **Note**: index starts from ``0`` and it doesn't count the label column when passing type is ``int``
  // desc = **Note**: all values should be less than ``Int32.MaxValue`` (2147483647)
679
  // desc = **Note**: using large values could be memory consuming. Tree decision rule works best when categorical features are presented by consecutive integers starting from zero
680
  // desc = **Note**: all negative values will be treated as **missing values**
681
  // desc = **Note**: the output cannot be monotonically constrained with respect to a categorical feature
Guolin Ke's avatar
Guolin Ke committed
682
683
  std::string categorical_feature = "";

684
685
686
687
688
  // desc = path to a ``.json`` file that specifies bin upper bounds for some or all features
  // desc = ``.json`` file should contain an array of objects, each containing the word ``feature`` (integer feature index) and ``bin_upper_bound`` (array of thresholds for binning)
  // desc = see `this file <https://github.com/microsoft/LightGBM/tree/master/examples/regression/forced_bins.json>`__ as an example
  std::string forcedbins_filename = "";

689
  // [no-save]
690
691
692
693
694
695
696
697
698
699
  // alias = is_save_binary, is_save_binary_file
  // desc = if ``true``, LightGBM will save the dataset (including validation data) to a binary file. This speed ups the data loading for the next time
  // desc = **Note**: ``init_score`` is not saved in binary file
  // desc = **Note**: can be used only in CLI version; for language-specific packages you can use the correspondent function
  bool save_binary = false;

  #pragma endregion

  #pragma region Predict Parameters

700
701
702
703
704
705
  // [no-save]
  // desc = used only in ``prediction`` task
  // desc = used to specify from which iteration to start the prediction
  // desc = ``<= 0`` means from the first iteration
  int start_iteration_predict = 0;

706
  // [no-save]
707
708
709
710
711
  // desc = used only in ``prediction`` task
  // desc = used to specify how many trained iterations will be used in prediction
  // desc = ``<= 0`` means no limit
  int num_iteration_predict = -1;

712
  // [no-save]
713
714
715
716
  // alias = is_predict_raw_score, predict_rawscore, raw_score
  // desc = used only in ``prediction`` task
  // desc = set this to ``true`` to predict only the raw scores
  // desc = set this to ``false`` to predict transformed scores
Guolin Ke's avatar
Guolin Ke committed
717
718
  bool predict_raw_score = false;

719
  // [no-save]
720
721
722
  // alias = is_predict_leaf_index, leaf_index
  // desc = used only in ``prediction`` task
  // desc = set this to ``true`` to predict with leaf index of all trees
Guolin Ke's avatar
Guolin Ke committed
723
724
  bool predict_leaf_index = false;

725
  // [no-save]
726
727
  // alias = is_predict_contrib, contrib
  // desc = used only in ``prediction`` task
728
  // desc = set this to ``true`` to estimate `SHAP values <https://arxiv.org/abs/1706.06060>`__, which represent how each feature contributes to each prediction
729
  // desc = produces ``#features + 1`` values where the last value is the expected value of the model output over the training data
730
  // desc = **Note**: if you want to get more explanation for your model's predictions using SHAP values like SHAP interaction values, you can install `shap package <https://github.com/slundberg/shap>`__
Nikita Titov's avatar
Nikita Titov committed
731
  // desc = **Note**: unlike the shap package, with ``predict_contrib`` we return a matrix with an extra column, where the last column is the expected value
Guolin Ke's avatar
Guolin Ke committed
732
733
  bool predict_contrib = false;

734
  // [no-save]
735
  // desc = used only in ``prediction`` task
736
737
738
739
740
  // desc = control whether or not LightGBM raises an error when you try to predict on data with a different number of features than the training data
  // desc = if ``false`` (the default), a fatal error will be raised if the number of features in the dataset you predict on differs from the number seen during training
  // desc = if ``true``, LightGBM will attempt to predict on whatever data you provide. This is dangerous because you might get incorrect predictions, but you could use it in situations where it is difficult or expensive to generate some features and you are very confident that they were never chosen for splits in the model
  // desc = **Note**: be very careful setting this parameter to ``true``
  bool predict_disable_shape_check = false;
Guolin Ke's avatar
Guolin Ke committed
741

742
  // [no-save]
743
744
  // desc = used only in ``prediction`` task
  // desc = if ``true``, will use early-stopping to speed up the prediction. May affect the accuracy
745
  bool pred_early_stop = false;
746

747
  // [no-save]
748
749
  // desc = used only in ``prediction`` task
  // desc = the frequency of checking early-stopping prediction
750
  int pred_early_stop_freq = 10;
Guolin Ke's avatar
Guolin Ke committed
751

752
  // [no-save]
753
754
  // desc = used only in ``prediction`` task
  // desc = the threshold of margin in early-stopping prediction
Guolin Ke's avatar
Guolin Ke committed
755
  double pred_early_stop_margin = 10.0;
Guolin Ke's avatar
Guolin Ke committed
756

757
  // [no-save]
758
  // alias = predict_result, prediction_result, predict_name, prediction_name, pred_name, name_pred
759
  // desc = used only in ``prediction`` task
760
761
762
763
764
765
766
  // desc = filename of prediction result
  // desc = **Note**: can be used only in CLI version
  std::string output_result = "LightGBM_predict_result.txt";

  #pragma endregion

  #pragma region Convert Parameters
767

768
  // [no-save]
769
  // desc = used only in ``convert_model`` task
770
  // desc = only ``cpp`` is supported yet; for conversion model to other languages consider using `m2cgen <https://github.com/BayesWitnesses/m2cgen>`__ utility
771
  // desc = if ``convert_model_language`` is set and ``task=train``, the model will be also converted
772
  // desc = **Note**: can be used only in CLI version
Guolin Ke's avatar
Guolin Ke committed
773
774
  std::string convert_model_language = "";

775
  // [no-save]
776
777
778
  // alias = convert_model_file
  // desc = used only in ``convert_model`` task
  // desc = output filename of converted model
779
  // desc = **Note**: can be used only in CLI version
Guolin Ke's avatar
Guolin Ke committed
780
781
  std::string convert_model = "gbdt_prediction.cpp";

782
  #pragma endregion
Guolin Ke's avatar
Guolin Ke committed
783

784
785
  #pragma endregion

Guolin Ke's avatar
Guolin Ke committed
786
787
  #pragma region Objective Parameters

788
  // desc = used only in ``rank_xendcg`` objective
789
790
791
  // desc = random seed for objectives, if random process is needed
  int objective_seed = 5;

792
793
794
795
  // check = >0
  // alias = num_classes
  // desc = used only in ``multi-class`` classification application
  int num_class = 1;
Guolin Ke's avatar
Guolin Ke committed
796

797
  // alias = unbalance, unbalanced_sets
798
  // desc = used only in ``binary`` and ``multiclassova`` applications
799
  // desc = set this to ``true`` if training data are unbalanced
800
  // desc = **Note**: while enabling this should increase the overall performance metric of your model, it will also result in poor estimates of the individual class probabilities
801
802
  // desc = **Note**: this parameter cannot be used at the same time with ``scale_pos_weight``, choose only **one** of them
  bool is_unbalance = false;
Guolin Ke's avatar
Guolin Ke committed
803

804
  // check = >0.0
805
  // desc = used only in ``binary`` and ``multiclassova`` applications
806
  // desc = weight of labels with positive class
807
  // desc = **Note**: while enabling this should increase the overall performance metric of your model, it will also result in poor estimates of the individual class probabilities
808
809
  // desc = **Note**: this parameter cannot be used at the same time with ``is_unbalance``, choose only **one** of them
  double scale_pos_weight = 1.0;
Guolin Ke's avatar
Guolin Ke committed
810

811
812
813
814
  // check = >0.0
  // desc = used only in ``binary`` and ``multiclassova`` classification and in ``lambdarank`` applications
  // desc = parameter for the sigmoid function
  double sigmoid = 1.0;
Guolin Ke's avatar
Guolin Ke committed
815

816
  // desc = used only in ``regression``, ``binary``, ``multiclassova`` and ``cross-entropy`` applications
817
  // desc = adjusts initial score to the mean of labels for faster convergence
Guolin Ke's avatar
Guolin Ke committed
818
819
  bool boost_from_average = true;

820
821
822
823
  // desc = used only in ``regression`` application
  // desc = used to fit ``sqrt(label)`` instead of original values and prediction result will be also automatically converted to ``prediction^2``
  // desc = might be useful in case of large-range labels
  bool reg_sqrt = false;
Guolin Ke's avatar
Guolin Ke committed
824

825
826
827
828
  // check = >0.0
  // desc = used only in ``huber`` and ``quantile`` ``regression`` applications
  // desc = parameter for `Huber loss <https://en.wikipedia.org/wiki/Huber_loss>`__ and `Quantile regression <https://en.wikipedia.org/wiki/Quantile_regression>`__
  double alpha = 0.9;
Guolin Ke's avatar
Guolin Ke committed
829

830
831
832
833
  // check = >0.0
  // desc = used only in ``fair`` ``regression`` application
  // desc = parameter for `Fair loss <https://www.kaggle.com/c/allstate-claims-severity/discussion/24520>`__
  double fair_c = 1.0;
Guolin Ke's avatar
Guolin Ke committed
834

835
836
837
838
  // check = >0.0
  // desc = used only in ``poisson`` ``regression`` application
  // desc = parameter for `Poisson regression <https://en.wikipedia.org/wiki/Poisson_regression>`__ to safeguard optimization
  double poisson_max_delta_step = 0.7;
Guolin Ke's avatar
Guolin Ke committed
839

840
841
842
843
844
845
846
  // check = >=1.0
  // check = <2.0
  // desc = used only in ``tweedie`` ``regression`` application
  // desc = used to control the variance of the tweedie distribution
  // desc = set this closer to ``2`` to shift towards a **Gamma** distribution
  // desc = set this closer to ``1`` to shift towards a **Poisson** distribution
  double tweedie_variance_power = 1.5;
Guolin Ke's avatar
Guolin Ke committed
847

848
849
  // check = >0
  // desc = used only in ``lambdarank`` application
Nikita Titov's avatar
Nikita Titov committed
850
851
  // desc = controls the number of top-results to focus on during training, refer to "truncation level" in the Sec. 3 of `LambdaMART paper <https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/MSR-TR-2010-82.pdf>`__
  // desc = this parameter is closely related to the desirable cutoff ``k`` in the metric **NDCG@k** that we aim at optimizing the ranker for. The optimal setting for this parameter is likely to be slightly higher than ``k`` (e.g., ``k + 3``) to include more pairs of documents to train on, but perhaps not too high to avoid deviating too much from the desired target metric **NDCG@k**
852
  int lambdarank_truncation_level = 30;
Guolin Ke's avatar
Guolin Ke committed
853

854
855
  // desc = used only in ``lambdarank`` application
  // desc = set this to ``true`` to normalize the lambdas for different queries, and improve the performance for unbalanced data
856
857
  // desc = set this to ``false`` to enforce the original lambdarank algorithm
  bool lambdarank_norm = true;
858

859
860
861
862
863
864
865
  // type = multi-double
  // default = 0,1,3,7,15,31,63,...,2^30-1
  // desc = used only in ``lambdarank`` application
  // desc = relevant gain for labels. For example, the gain of label ``2`` is ``3`` in case of default label gains
  // desc = separate by ``,``
  std::vector<double> label_gain;

Guolin Ke's avatar
Guolin Ke committed
866
867
868
  #pragma endregion

  #pragma region Metric Parameters
869

Guolin Ke's avatar
Guolin Ke committed
870
  // [doc-only]
871
872
873
  // alias = metrics, metric_types
  // default = ""
  // type = multi-enum
874
  // desc = metric(s) to be evaluated on the evaluation set(s)
875
  // descl2 = ``""`` (empty string or not specified) means that metric corresponding to specified ``objective`` will be used (this is possible only for pre-defined objective functions, otherwise no evaluation metric will be added)
876
  // descl2 = ``"None"`` (string, **not** a ``None`` value) means that no metric will be registered, aliases: ``na``, ``null``, ``custom``
877
878
  // descl2 = ``l1``, absolute loss, aliases: ``mean_absolute_error``, ``mae``, ``regression_l1``
  // descl2 = ``l2``, square loss, aliases: ``mean_squared_error``, ``mse``, ``regression_l2``, ``regression``
879
  // descl2 = ``rmse``, root square loss, aliases: ``root_mean_squared_error``, ``l2_root``
880
881
882
883
884
885
886
887
  // descl2 = ``quantile``, `Quantile regression <https://en.wikipedia.org/wiki/Quantile_regression>`__
  // descl2 = ``mape``, `MAPE loss <https://en.wikipedia.org/wiki/Mean_absolute_percentage_error>`__, aliases: ``mean_absolute_percentage_error``
  // descl2 = ``huber``, `Huber loss <https://en.wikipedia.org/wiki/Huber_loss>`__
  // descl2 = ``fair``, `Fair loss <https://www.kaggle.com/c/allstate-claims-severity/discussion/24520>`__
  // descl2 = ``poisson``, negative log-likelihood for `Poisson regression <https://en.wikipedia.org/wiki/Poisson_regression>`__
  // descl2 = ``gamma``, negative log-likelihood for **Gamma** regression
  // descl2 = ``gamma_deviance``, residual deviance for **Gamma** regression
  // descl2 = ``tweedie``, negative log-likelihood for **Tweedie** regression
888
  // descl2 = ``ndcg``, `NDCG <https://en.wikipedia.org/wiki/Discounted_cumulative_gain#Normalized_DCG>`__, aliases: ``lambdarank``, ``rank_xendcg``, ``xendcg``, ``xe_ndcg``, ``xe_ndcg_mart``, ``xendcg_mart``
889
890
  // descl2 = ``map``, `MAP <https://makarandtapaswi.wordpress.com/2012/07/02/intuition-behind-average-precision-and-map/>`__, aliases: ``mean_average_precision``
  // descl2 = ``auc``, `AUC <https://en.wikipedia.org/wiki/Receiver_operating_characteristic#Area_under_the_curve>`__
891
  // descl2 = ``average_precision``, `average precision score <https://scikit-learn.org/stable/modules/generated/sklearn.metrics.average_precision_score.html>`__
892
893
  // descl2 = ``binary_logloss``, `log loss <https://en.wikipedia.org/wiki/Cross_entropy>`__, aliases: ``binary``
  // descl2 = ``binary_error``, for one sample: ``0`` for correct classification, ``1`` for error classification
Belinda Trotta's avatar
Belinda Trotta committed
894
  // descl2 = ``auc_mu``, `AUC-mu <http://proceedings.mlr.press/v97/kleiman19a/kleiman19a.pdf>`__
895
896
  // descl2 = ``multi_logloss``, log loss for multi-class classification, aliases: ``multiclass``, ``softmax``, ``multiclassova``, ``multiclass_ova``, ``ova``, ``ovr``
  // descl2 = ``multi_error``, error rate for multi-class classification
Guolin Ke's avatar
Guolin Ke committed
897
898
899
  // descl2 = ``cross_entropy``, cross-entropy (with optional linear weights), aliases: ``xentropy``
  // descl2 = ``cross_entropy_lambda``, "intensity-weighted" cross-entropy, aliases: ``xentlambda``
  // descl2 = ``kullback_leibler``, `Kullback-Leibler divergence <https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence>`__, aliases: ``kldiv``
900
  // desc = support multiple metrics, separated by ``,``
Guolin Ke's avatar
Guolin Ke committed
901
902
  std::vector<std::string> metric;

903
  // [no-save]
904
  // check = >0
Guolin Ke's avatar
Guolin Ke committed
905
906
  // alias = output_freq
  // desc = frequency for metric output
907
  // desc = **Note**: can be used only in CLI version
Guolin Ke's avatar
Guolin Ke committed
908
909
  int metric_freq = 1;

910
  // [no-save]
911
912
  // alias = training_metric, is_training_metric, train_metric
  // desc = set this to ``true`` to output metric result over training dataset
913
  // desc = **Note**: can be used only in CLI version
914
  bool is_provide_training_metric = false;
915

916
917
  // type = multi-int
  // default = 1,2,3,4,5
918
  // alias = ndcg_eval_at, ndcg_at, map_eval_at, map_at
919
  // desc = used only with ``ndcg`` and ``map`` metrics
920
  // desc = `NDCG <https://en.wikipedia.org/wiki/Discounted_cumulative_gain#Normalized_DCG>`__ and `MAP <https://makarandtapaswi.wordpress.com/2012/07/02/intuition-behind-average-precision-and-map/>`__ evaluation positions, separated by ``,``
Guolin Ke's avatar
Guolin Ke committed
921
  std::vector<int> eval_at;
Guolin Ke's avatar
Guolin Ke committed
922

Belinda Trotta's avatar
Belinda Trotta committed
923
924
925
926
927
928
929
930
  // check = >0
  // desc = used only with ``multi_error`` metric
  // desc = threshold for top-k multi-error metric
  // desc = the error on each sample is ``0`` if the true class is among the top ``multi_error_top_k`` predictions, and ``1`` otherwise
  // descl2 = more precisely, the error on a sample is ``0`` if there are at least ``num_classes - multi_error_top_k`` predictions strictly less than the prediction on the true class
  // desc = when ``multi_error_top_k=1`` this is equivalent to the usual multi-error metric
  int multi_error_top_k = 1;

Belinda Trotta's avatar
Belinda Trotta committed
931
932
933
934
935
936
937
938
939
  // type = multi-double
  // default = None
  // desc = used only with ``auc_mu`` metric
  // desc = list representing flattened matrix (in row-major order) giving loss weights for classification errors
  // desc = list should have ``n * n`` elements, where ``n`` is the number of classes
  // desc = the matrix co-ordinate ``[i, j]`` should correspond to the ``i * n + j``-th element of the list
  // desc = if not specified, will use equal weights for all classes
  std::vector<double> auc_mu_weights;

Guolin Ke's avatar
Guolin Ke committed
940
941
942
943
  #pragma endregion

  #pragma region Network Parameters

944
945
946
947
  // check = >0
  // alias = num_machine
  // desc = the number of machines for parallel learning application
  // desc = this parameter is needed to be set in both **socket** and **mpi** versions
Guolin Ke's avatar
Guolin Ke committed
948
  int num_machines = 1;
Guolin Ke's avatar
Guolin Ke committed
949

950
951
952
953
  // check = >0
  // alias = local_port, port
  // desc = TCP listen port for local machines
  // desc = **Note**: don't forget to allow this port in firewall settings before training
Guolin Ke's avatar
Guolin Ke committed
954
  int local_listen_port = 12400;
Guolin Ke's avatar
Guolin Ke committed
955

956
957
958
  // check = >0
  // desc = socket time-out in minutes
  int time_out = 120;
Guolin Ke's avatar
Guolin Ke committed
959

960
961
962
  // alias = machine_list_file, machine_list, mlist
  // desc = path of file that lists machines for this parallel learning application
  // desc = each line contains one IP and one port for one machine. The format is ``ip port`` (space as a separator)
Guolin Ke's avatar
Guolin Ke committed
963
  std::string machine_list_filename = "";
Guolin Ke's avatar
Guolin Ke committed
964

965
966
  // alias = workers, nodes
  // desc = list of machines in the following format: ``ip1:port1,ip2:port2``
967
  std::string machines = "";
Guolin Ke's avatar
Guolin Ke committed
968

Guolin Ke's avatar
Guolin Ke committed
969
970
971
972
  #pragma endregion

  #pragma region GPU Parameters

973
974
  // desc = OpenCL platform ID. Usually each GPU vendor exposes one OpenCL platform
  // desc = ``-1`` means the system-wide default platform
975
  // desc = **Note**: refer to `GPU Targets <./GPU-Targets.rst#query-opencl-devices-in-your-system>`__ for more details
Guolin Ke's avatar
Guolin Ke committed
976
977
  int gpu_platform_id = -1;

978
979
  // desc = OpenCL device ID in the specified platform. Each GPU in the selected platform has a unique device ID
  // desc = ``-1`` means the default device in the selected platform
980
  // desc = **Note**: refer to `GPU Targets <./GPU-Targets.rst#query-opencl-devices-in-your-system>`__ for more details
Guolin Ke's avatar
Guolin Ke committed
981
982
  int gpu_device_id = -1;

983
  // desc = set this to ``true`` to use double precision math on GPU (by default single precision is used in OpenCL implementation and double precision is used in CUDA implementation)
Guolin Ke's avatar
Guolin Ke committed
984
985
  bool gpu_use_dp = false;

986
987
988
989
990
  // check = >0
  // desc = number of GPUs
  // desc = **Note**: can be used only in CUDA implementation
  int num_gpu = 1;

Guolin Ke's avatar
Guolin Ke committed
991
992
993
  #pragma endregion

  #pragma endregion
Guolin Ke's avatar
Guolin Ke committed
994

995
996
  size_t file_load_progress_interval_bytes = size_t(10) * 1024 * 1024 * 1024;

Guolin Ke's avatar
Guolin Ke committed
997
  bool is_parallel = false;
998
  bool is_data_based_parallel = false;
Guolin Ke's avatar
Guolin Ke committed
999
  LIGHTGBM_EXPORT void Set(const std::unordered_map<std::string, std::string>& params);
jcipar's avatar
jcipar committed
1000
1001
  static const std::unordered_map<std::string, std::string>& alias_table();
  static const std::unordered_set<std::string>& parameter_set();
Belinda Trotta's avatar
Belinda Trotta committed
1002
  std::vector<std::vector<double>> auc_mu_weights_matrix;
1003
  std::vector<std::vector<int>> interaction_constraints_vector;
1004

Nikita Titov's avatar
Nikita Titov committed
1005
 private:
Guolin Ke's avatar
Guolin Ke committed
1006
  void CheckParamConflict();
Guolin Ke's avatar
Guolin Ke committed
1007
1008
  void GetMembersFromString(const std::unordered_map<std::string, std::string>& params);
  std::string SaveMembersToString() const;
Belinda Trotta's avatar
Belinda Trotta committed
1009
  void GetAucMuWeights();
1010
  void GetInteractionConstraints();
Guolin Ke's avatar
Guolin Ke committed
1011
1012
};

Guolin Ke's avatar
Guolin Ke committed
1013
inline bool Config::GetString(
Guolin Ke's avatar
Guolin Ke committed
1014
1015
  const std::unordered_map<std::string, std::string>& params,
  const std::string& name, std::string* out) {
1016
  if (params.count(name) > 0 && !params.at(name).empty()) {
Guolin Ke's avatar
Guolin Ke committed
1017
1018
1019
1020
1021
1022
    *out = params.at(name);
    return true;
  }
  return false;
}

Guolin Ke's avatar
Guolin Ke committed
1023
inline bool Config::GetInt(
Guolin Ke's avatar
Guolin Ke committed
1024
1025
  const std::unordered_map<std::string, std::string>& params,
  const std::string& name, int* out) {
1026
  if (params.count(name) > 0 && !params.at(name).empty()) {
1027
    if (!Common::AtoiAndCheck(params.at(name).c_str(), out)) {
1028
      Log::Fatal("Parameter %s should be of type int, got \"%s\"",
Guolin Ke's avatar
Guolin Ke committed
1029
                 name.c_str(), params.at(name).c_str());
1030
    }
Guolin Ke's avatar
Guolin Ke committed
1031
1032
1033
1034
1035
    return true;
  }
  return false;
}

Guolin Ke's avatar
Guolin Ke committed
1036
inline bool Config::GetDouble(
Guolin Ke's avatar
Guolin Ke committed
1037
  const std::unordered_map<std::string, std::string>& params,
1038
  const std::string& name, double* out) {
1039
  if (params.count(name) > 0 && !params.at(name).empty()) {
1040
    if (!Common::AtofAndCheck(params.at(name).c_str(), out)) {
1041
      Log::Fatal("Parameter %s should be of type double, got \"%s\"",
Guolin Ke's avatar
Guolin Ke committed
1042
                 name.c_str(), params.at(name).c_str());
1043
    }
Guolin Ke's avatar
Guolin Ke committed
1044
1045
1046
1047
1048
    return true;
  }
  return false;
}

Guolin Ke's avatar
Guolin Ke committed
1049
inline bool Config::GetBool(
Guolin Ke's avatar
Guolin Ke committed
1050
1051
  const std::unordered_map<std::string, std::string>& params,
  const std::string& name, bool* out) {
1052
  if (params.count(name) > 0 && !params.at(name).empty()) {
Guolin Ke's avatar
Guolin Ke committed
1053
    std::string value = params.at(name);
Guolin Ke's avatar
Guolin Ke committed
1054
    std::transform(value.begin(), value.end(), value.begin(), Common::tolower);
1055
    if (value == std::string("false") || value == std::string("-")) {
Guolin Ke's avatar
Guolin Ke committed
1056
      *out = false;
1057
    } else if (value == std::string("true") || value == std::string("+")) {
Guolin Ke's avatar
Guolin Ke committed
1058
      *out = true;
1059
    } else {
1060
      Log::Fatal("Parameter %s should be \"true\"/\"+\" or \"false\"/\"-\", got \"%s\"",
Guolin Ke's avatar
Guolin Ke committed
1061
                 name.c_str(), params.at(name).c_str());
Guolin Ke's avatar
Guolin Ke committed
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
    }
    return true;
  }
  return false;
}

struct ParameterAlias {
  static void KeyAliasTransform(std::unordered_map<std::string, std::string>* params) {
    std::unordered_map<std::string, std::string> tmp_map;
    for (const auto& pair : *params) {
jcipar's avatar
jcipar committed
1072
1073
      auto alias = Config::alias_table().find(pair.first);
      if (alias != Config::alias_table().end()) {  // found alias
Guolin Ke's avatar
Guolin Ke committed
1074
        auto alias_set = tmp_map.find(alias->second);
1075
1076
        if (alias_set != tmp_map.end()) {  // alias already set
                                           // set priority by length & alphabetically to ensure reproducible behavior
wxchan's avatar
wxchan committed
1077
1078
          if (alias_set->second.size() < pair.first.size() ||
            (alias_set->second.size() == pair.first.size() && alias_set->second < pair.first)) {
1079
            Log::Warning("%s is set with %s=%s, %s=%s will be ignored. Current value: %s=%s",
Guolin Ke's avatar
Guolin Ke committed
1080
1081
                         alias->second.c_str(), alias_set->second.c_str(), params->at(alias_set->second).c_str(),
                         pair.first.c_str(), pair.second.c_str(), alias->second.c_str(), params->at(alias_set->second).c_str());
wxchan's avatar
wxchan committed
1082
          } else {
1083
            Log::Warning("%s is set with %s=%s, will be overridden by %s=%s. Current value: %s=%s",
Guolin Ke's avatar
Guolin Ke committed
1084
1085
                         alias->second.c_str(), alias_set->second.c_str(), params->at(alias_set->second).c_str(),
                         pair.first.c_str(), pair.second.c_str(), alias->second.c_str(), pair.second.c_str());
wxchan's avatar
wxchan committed
1086
1087
            tmp_map[alias->second] = pair.first;
          }
1088
        } else {  // alias not set
wxchan's avatar
wxchan committed
1089
1090
          tmp_map.emplace(alias->second, pair.first);
        }
jcipar's avatar
jcipar committed
1091
      } else if (Config::parameter_set().find(pair.first) == Config::parameter_set().end()) {
wxchan's avatar
wxchan committed
1092
        Log::Warning("Unknown parameter: %s", pair.first.c_str());
Guolin Ke's avatar
Guolin Ke committed
1093
1094
1095
      }
    }
    for (const auto& pair : tmp_map) {
wxchan's avatar
wxchan committed
1096
      auto alias = params->find(pair.first);
1097
      if (alias == params->end()) {  // not find
wxchan's avatar
wxchan committed
1098
1099
1100
        params->emplace(pair.first, params->at(pair.second));
        params->erase(pair.second);
      } else {
Guolin Ke's avatar
Guolin Ke committed
1101
1102
1103
        Log::Warning("%s is set=%s, %s=%s will be ignored. Current value: %s=%s",
                     pair.first.c_str(), alias->second.c_str(), pair.second.c_str(), params->at(pair.second).c_str(),
                     pair.first.c_str(), alias->second.c_str());
Guolin Ke's avatar
Guolin Ke committed
1104
1105
1106
1107
1108
      }
    }
  }
};

1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
inline std::string ParseObjectiveAlias(const std::string& type) {
  if (type == std::string("regression") || type == std::string("regression_l2")
    || type == std::string("mean_squared_error") || type == std::string("mse") || type == std::string("l2")
    || type == std::string("l2_root") || type == std::string("root_mean_squared_error") || type == std::string("rmse")) {
    return "regression";
  } else if (type == std::string("regression_l1") || type == std::string("mean_absolute_error")
    || type == std::string("l1") || type == std::string("mae")) {
    return "regression_l1";
  } else if (type == std::string("multiclass") || type == std::string("softmax")) {
    return "multiclass";
  } else if (type == std::string("multiclassova") || type == std::string("multiclass_ova") || type == std::string("ova") || type == std::string("ovr")) {
    return "multiclassova";
  } else if (type == std::string("xentropy") || type == std::string("cross_entropy")) {
    return "cross_entropy";
  } else if (type == std::string("xentlambda") || type == std::string("cross_entropy_lambda")) {
    return "cross_entropy_lambda";
  } else if (type == std::string("mean_absolute_percentage_error") || type == std::string("mape")) {
    return "mape";
1127
1128
1129
  } else if (type == std::string("rank_xendcg") || type == std::string("xendcg") || type == std::string("xe_ndcg")
             || type == std::string("xe_ndcg_mart") || type == std::string("xendcg_mart")) {
    return "rank_xendcg";
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
  } else if (type == std::string("none") || type == std::string("null") || type == std::string("custom") || type == std::string("na")) {
    return "custom";
  }
  return type;
}

inline std::string ParseMetricAlias(const std::string& type) {
  if (type == std::string("regression") || type == std::string("regression_l2") || type == std::string("l2") || type == std::string("mean_squared_error") || type == std::string("mse")) {
    return "l2";
  } else if (type == std::string("l2_root") || type == std::string("root_mean_squared_error") || type == std::string("rmse")) {
    return "rmse";
  } else if (type == std::string("regression_l1") || type == std::string("l1") || type == std::string("mean_absolute_error") || type == std::string("mae")) {
    return "l1";
  } else if (type == std::string("binary_logloss") || type == std::string("binary")) {
    return "binary_logloss";
1145
1146
  } else if (type == std::string("ndcg") || type == std::string("lambdarank") || type == std::string("rank_xendcg")
             || type == std::string("xendcg") || type == std::string("xe_ndcg") || type == std::string("xe_ndcg_mart") || type == std::string("xendcg_mart")) {
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
    return "ndcg";
  } else if (type == std::string("map") || type == std::string("mean_average_precision")) {
    return "map";
  } else if (type == std::string("multi_logloss") || type == std::string("multiclass") || type == std::string("softmax") || type == std::string("multiclassova") || type == std::string("multiclass_ova") || type == std::string("ova") || type == std::string("ovr")) {
    return "multi_logloss";
  } else if (type == std::string("xentropy") || type == std::string("cross_entropy")) {
    return "cross_entropy";
  } else if (type == std::string("xentlambda") || type == std::string("cross_entropy_lambda")) {
    return "cross_entropy_lambda";
  } else if (type == std::string("kldiv") || type == std::string("kullback_leibler")) {
    return "kullback_leibler";
  } else if (type == std::string("mean_absolute_percentage_error") || type == std::string("mape")) {
    return "mape";
Belinda Trotta's avatar
Belinda Trotta committed
1160
1161
  } else if (type == std::string("auc_mu")) {
    return "auc_mu";
1162
1163
1164
1165
1166
1167
  } else if (type == std::string("none") || type == std::string("null") || type == std::string("custom") || type == std::string("na")) {
    return "custom";
  }
  return type;
}

Guolin Ke's avatar
Guolin Ke committed
1168
1169
}   // namespace LightGBM

Belinda Trotta's avatar
Belinda Trotta committed
1170
#endif   // LightGBM_CONFIG_H_