config.h 63.9 KB
Newer Older
1
2
3
4
5
/*!
 * Copyright (c) 2016 Microsoft Corporation. All rights reserved.
 * Licensed under the MIT License. See LICENSE file in the project root for license information.
 *
 * \note
6
7
8
9
 * - desc and descl2 fields must be written in reStructuredText format;
 * - nested sections can be placed only at the bottom of parent's section;
 * - [doc-only] tag indicates that only documentation for this param should be generated and all other actions are performed manually;
 * - [no-save] tag indicates that this param should not be saved into a model text representation.
10
 */
Guolin Ke's avatar
Guolin Ke committed
11
12
13
#ifndef LIGHTGBM_CONFIG_H_
#define LIGHTGBM_CONFIG_H_

14
15
16
17
18
#include <LightGBM/export.h>
#include <LightGBM/meta.h>
#include <LightGBM/utils/common.h>
#include <LightGBM/utils/log.h>

Guolin Ke's avatar
Guolin Ke committed
19
20
#include <string>
#include <algorithm>
Guolin Ke's avatar
Guolin Ke committed
21
#include <memory>
22
23
24
#include <unordered_map>
#include <unordered_set>
#include <vector>
Guolin Ke's avatar
Guolin Ke committed
25
26
27

namespace LightGBM {

Guolin Ke's avatar
Guolin Ke committed
28
29
/*! \brief Types of tasks */
enum TaskType {
30
  kTrain, kPredict, kConvertModel, KRefitTree, kSaveBinary
Guolin Ke's avatar
Guolin Ke committed
31
};
32
const int kDefaultNumLeaves = 31;
Guolin Ke's avatar
Guolin Ke committed
33

Guolin Ke's avatar
Guolin Ke committed
34
struct Config {
Nikita Titov's avatar
Nikita Titov committed
35
 public:
Guolin Ke's avatar
Guolin Ke committed
36
  std::string ToString() const;
Guolin Ke's avatar
Guolin Ke committed
37
38
39
40
  /*!
  * \brief Get string value by specific name of key
  * \param params Store the key and value for params
  * \param name Name of key
Hui Xue's avatar
Hui Xue committed
41
  * \param out Value will assign to out if key exists
Guolin Ke's avatar
Guolin Ke committed
42
43
  * \return True if key exists
  */
Guolin Ke's avatar
Guolin Ke committed
44
  inline static bool GetString(
Guolin Ke's avatar
Guolin Ke committed
45
46
47
48
49
50
51
    const std::unordered_map<std::string, std::string>& params,
    const std::string& name, std::string* out);

  /*!
  * \brief Get int value by specific name of key
  * \param params Store the key and value for params
  * \param name Name of key
Hui Xue's avatar
Hui Xue committed
52
  * \param out Value will assign to out if key exists
Guolin Ke's avatar
Guolin Ke committed
53
54
  * \return True if key exists
  */
Guolin Ke's avatar
Guolin Ke committed
55
  inline static bool GetInt(
Guolin Ke's avatar
Guolin Ke committed
56
57
58
59
    const std::unordered_map<std::string, std::string>& params,
    const std::string& name, int* out);

  /*!
60
  * \brief Get double value by specific name of key
Guolin Ke's avatar
Guolin Ke committed
61
62
  * \param params Store the key and value for params
  * \param name Name of key
Hui Xue's avatar
Hui Xue committed
63
  * \param out Value will assign to out if key exists
Guolin Ke's avatar
Guolin Ke committed
64
65
  * \return True if key exists
  */
Guolin Ke's avatar
Guolin Ke committed
66
  inline static bool GetDouble(
Guolin Ke's avatar
Guolin Ke committed
67
    const std::unordered_map<std::string, std::string>& params,
68
    const std::string& name, double* out);
Guolin Ke's avatar
Guolin Ke committed
69
70
71
72
73

  /*!
  * \brief Get bool value by specific name of key
  * \param params Store the key and value for params
  * \param name Name of key
Hui Xue's avatar
Hui Xue committed
74
  * \param out Value will assign to out if key exists
Guolin Ke's avatar
Guolin Ke committed
75
76
  * \return True if key exists
  */
Guolin Ke's avatar
Guolin Ke committed
77
  inline static bool GetBool(
Guolin Ke's avatar
Guolin Ke committed
78
79
    const std::unordered_map<std::string, std::string>& params,
    const std::string& name, bool* out);
80

Guolin Ke's avatar
Guolin Ke committed
81
  static void KV2Map(std::unordered_map<std::string, std::string>* params, const char* kv);
82
  static std::unordered_map<std::string, std::string> Str2Map(const char* parameters);
Guolin Ke's avatar
Guolin Ke committed
83

Guolin Ke's avatar
Guolin Ke committed
84
  #pragma region Parameters
85

Guolin Ke's avatar
Guolin Ke committed
86
87
  #pragma region Core Parameters

88
  // [no-save]
Guolin Ke's avatar
Guolin Ke committed
89
  // [doc-only]
90
91
  // alias = config_file
  // desc = path of config file
92
  // desc = **Note**: can be used only in CLI version
Guolin Ke's avatar
Guolin Ke committed
93
94
  std::string config = "";

95
  // [no-save]
Guolin Ke's avatar
Guolin Ke committed
96
  // [doc-only]
97
98
99
100
101
102
  // type = enum
  // default = train
  // options = train, predict, convert_model, refit
  // alias = task_type
  // desc = ``train``, for training, aliases: ``training``
  // desc = ``predict``, for prediction, aliases: ``prediction``, ``test``
Nikita Titov's avatar
Nikita Titov committed
103
  // desc = ``convert_model``, for converting model file into if-else format, see more information in `Convert Parameters <#convert-parameters>`__
104
  // desc = ``refit``, for refitting existing models with new data, aliases: ``refit_tree``
105
  // desc = ``save_binary``, load train (and validation) data then save dataset to binary file. Typical usage: ``save_binary`` first, then run multiple ``train`` tasks in parallel using the saved binary file
Guolin Ke's avatar
Guolin Ke committed
106
  // desc = **Note**: can be used only in CLI version; for language-specific packages you can use the correspondent functions
Guolin Ke's avatar
Guolin Ke committed
107
108
109
  TaskType task = TaskType::kTrain;

  // [doc-only]
110
  // type = enum
111
  // options = regression, regression_l1, huber, fair, poisson, quantile, mape, gamma, tweedie, binary, multiclass, multiclassova, cross_entropy, cross_entropy_lambda, lambdarank, rank_xendcg
112
  // alias = objective_type, app, application, loss
113
  // desc = regression application
Guolin Ke's avatar
Guolin Ke committed
114
115
  // descl2 = ``regression``, L2 loss, aliases: ``regression_l2``, ``l2``, ``mean_squared_error``, ``mse``, ``l2_root``, ``root_mean_squared_error``, ``rmse``
  // descl2 = ``regression_l1``, L1 loss, aliases: ``l1``, ``mean_absolute_error``, ``mae``
116
117
118
119
120
  // descl2 = ``huber``, `Huber loss <https://en.wikipedia.org/wiki/Huber_loss>`__
  // descl2 = ``fair``, `Fair loss <https://www.kaggle.com/c/allstate-claims-severity/discussion/24520>`__
  // descl2 = ``poisson``, `Poisson regression <https://en.wikipedia.org/wiki/Poisson_regression>`__
  // descl2 = ``quantile``, `Quantile regression <https://en.wikipedia.org/wiki/Quantile_regression>`__
  // descl2 = ``mape``, `MAPE loss <https://en.wikipedia.org/wiki/Mean_absolute_percentage_error>`__, aliases: ``mean_absolute_percentage_error``
121
  // descl2 = ``gamma``, Gamma regression with log-link. It might be useful, e.g., for modeling insurance claims severity, or for any target that might be `gamma-distributed <https://en.wikipedia.org/wiki/Gamma_distribution#Occurrence_and_applications>`__
122
  // descl2 = ``tweedie``, Tweedie regression with log-link. It might be useful, e.g., for modeling total loss in insurance, or for any target that might be `tweedie-distributed <https://en.wikipedia.org/wiki/Tweedie_distribution#Occurrence_and_applications>`__
123
124
125
  // desc = binary classification application
  // descl2 = ``binary``, binary `log loss <https://en.wikipedia.org/wiki/Cross_entropy>`__ classification (or logistic regression)
  // descl2 = requires labels in {0, 1}; see ``cross-entropy`` application for general probability labels in [0, 1]
126
127
128
129
130
  // desc = multi-class classification application
  // descl2 = ``multiclass``, `softmax <https://en.wikipedia.org/wiki/Softmax_function>`__ objective function, aliases: ``softmax``
  // descl2 = ``multiclassova``, `One-vs-All <https://en.wikipedia.org/wiki/Multiclass_classification#One-vs.-rest>`__ binary objective function, aliases: ``multiclass_ova``, ``ova``, ``ovr``
  // descl2 = ``num_class`` should be set as well
  // desc = cross-entropy application
Guolin Ke's avatar
Guolin Ke committed
131
132
  // descl2 = ``cross_entropy``, objective function for cross-entropy (with optional linear weights), aliases: ``xentropy``
  // descl2 = ``cross_entropy_lambda``, alternative parameterization of cross-entropy, aliases: ``xentlambda``
133
  // descl2 = label is anything in interval [0, 1]
134
  // desc = ranking application
135
  // descl2 = ``lambdarank``, `lambdarank <https://papers.nips.cc/paper/2971-learning-to-rank-with-nonsmooth-cost-functions.pdf>`__ objective. `label_gain <#label_gain>`__ can be used to set the gain (weight) of ``int`` label and all values in ``label`` must be smaller than number of elements in ``label_gain``
136
137
  // descl2 = ``rank_xendcg``, `XE_NDCG_MART <https://arxiv.org/abs/1911.09798>`__ ranking objective function, aliases: ``xendcg``, ``xe_ndcg``, ``xe_ndcg_mart``, ``xendcg_mart``
  // descl2 = ``rank_xendcg`` is faster than and achieves the similar performance as ``lambdarank``
138
  // descl2 = label should be ``int`` type, and larger number represents the higher relevance (e.g. 0:bad, 1:fair, 2:good, 3:perfect)
Guolin Ke's avatar
Guolin Ke committed
139
140
141
  std::string objective = "regression";

  // [doc-only]
142
143
  // type = enum
  // alias = boosting_type, boost
144
  // options = gbdt, rf, dart, goss
145
146
  // desc = ``gbdt``, traditional Gradient Boosting Decision Tree, aliases: ``gbrt``
  // desc = ``rf``, Random Forest, aliases: ``random_forest``
147
148
  // desc = ``dart``, `Dropouts meet Multiple Additive Regression Trees <https://arxiv.org/abs/1505.01866>`__
  // desc = ``goss``, Gradient-based One-Side Sampling
Nikita Titov's avatar
Nikita Titov committed
149
  // descl2 = **Note**: internally, LightGBM uses ``gbdt`` mode for the first ``1 / learning_rate`` iterations
Guolin Ke's avatar
Guolin Ke committed
150
151
  std::string boosting = "gbdt";

152
  // alias = train, train_data, train_data_file, data_filename
153
  // desc = path of training data, LightGBM will train from this data
154
  // desc = **Note**: can be used only in CLI version
Guolin Ke's avatar
Guolin Ke committed
155
156
  std::string data = "";

157
  // alias = test, valid_data, valid_data_file, test_data, test_data_file, valid_filenames
158
  // default = ""
159
  // desc = path(s) of validation/test data, LightGBM will output metrics for these data
160
  // desc = support multiple validation data, separated by ``,``
161
  // desc = **Note**: can be used only in CLI version
Guolin Ke's avatar
Guolin Ke committed
162
163
  std::vector<std::string> valid;

164
  // alias = num_iteration, n_iter, num_tree, num_trees, num_round, num_rounds, num_boost_round, n_estimators, max_iter
165
166
167
  // check = >=0
  // desc = number of boosting iterations
  // desc = **Note**: internally, LightGBM constructs ``num_class * num_iterations`` trees for multi-class classification problems
Guolin Ke's avatar
Guolin Ke committed
168
  int num_iterations = 100;
Guolin Ke's avatar
Guolin Ke committed
169

170
  // alias = shrinkage_rate, eta
171
  // check = >0.0
172
173
  // desc = shrinkage rate
  // desc = in ``dart``, it also affects on normalization weights of dropped trees
Guolin Ke's avatar
Guolin Ke committed
174
175
  double learning_rate = 0.1;

176
  // default = 31
177
  // alias = num_leaf, max_leaves, max_leaf, max_leaf_nodes
178
  // check = >1
179
  // check = <=131072
180
  // desc = max number of leaves in one tree
Guolin Ke's avatar
Guolin Ke committed
181
182
183
  int num_leaves = kDefaultNumLeaves;

  // [doc-only]
184
185
  // type = enum
  // options = serial, feature, data, voting
186
  // alias = tree, tree_type, tree_learner_type
187
188
189
190
  // desc = ``serial``, single machine tree learner
  // desc = ``feature``, feature parallel tree learner, aliases: ``feature_parallel``
  // desc = ``data``, data parallel tree learner, aliases: ``data_parallel``
  // desc = ``voting``, voting parallel tree learner, aliases: ``voting_parallel``
191
  // desc = refer to `Distributed Learning Guide <./Parallel-Learning-Guide.rst>`__ to get more details
Guolin Ke's avatar
Guolin Ke committed
192
193
  std::string tree_learner = "serial";

194
  // alias = num_thread, nthread, nthreads, n_jobs
Guolin Ke's avatar
Guolin Ke committed
195
  // desc = number of threads for LightGBM
196
197
198
199
  // desc = ``0`` means default number of threads in OpenMP
  // desc = for the best speed, set this to the number of **real CPU cores**, not the number of threads (most CPUs use `hyper-threading <https://en.wikipedia.org/wiki/Hyper-threading>`__ to generate 2 threads per CPU core)
  // desc = do not set it too large if your dataset is small (for instance, do not use 64 threads for a dataset with 10,000 rows)
  // desc = be aware a task manager or any similar CPU monitoring tool might report that cores not being fully utilized. **This is normal**
200
  // desc = for distributed learning, do not use all CPU cores because this will cause poor performance for the network communication
201
  // desc = **Note**: please **don't** change this during training, especially when running multiple jobs simultaneously by external packages, otherwise it may cause undesirable errors
Guolin Ke's avatar
Guolin Ke committed
202
203
204
  int num_threads = 0;

  // [doc-only]
205
  // type = enum
206
  // options = cpu, gpu, cuda
207
  // alias = device
208
209
210
211
  // desc = device for the tree learning, you can use GPU to achieve the faster learning
  // desc = **Note**: it is recommended to use the smaller ``max_bin`` (e.g. 63) to get the better speed up
  // desc = **Note**: for the faster speed, GPU uses 32-bit float point to sum up by default, so this may affect the accuracy for some tasks. You can set ``gpu_use_dp=true`` to enable 64-bit float point, but it will slow down the training
  // desc = **Note**: refer to `Installation Guide <./Installation-Guide.rst#build-gpu-version>`__ to build LightGBM with GPU support
Guolin Ke's avatar
Guolin Ke committed
212
213
214
  std::string device_type = "cpu";

  // [doc-only]
215
  // alias = random_seed, random_state
216
217
218
219
  // default = None
  // desc = this seed is used to generate other seeds, e.g. ``data_random_seed``, ``feature_fraction_seed``, etc.
  // desc = by default, this seed is unused in favor of default values of other seeds
  // desc = this seed has lower priority in comparison with other seeds, which means that it will be overridden, if you set other seeds explicitly
Guolin Ke's avatar
Guolin Ke committed
220
221
  int seed = 0;

Guolin Ke's avatar
Guolin Ke committed
222
223
224
225
226
  // desc = used only with ``cpu`` device type
  // desc = setting this to ``true`` should ensure the stable results when using the same data and the same parameters (and different ``num_threads``)
  // desc = when you use the different seeds, different LightGBM versions, the binaries compiled by different compilers, or in different systems, the results are expected to be different
  // desc = you can `raise issues <https://github.com/microsoft/LightGBM/issues>`__ in LightGBM GitHub repo when you meet the unstable results
  // desc = **Note**: setting this to ``true`` may slow down the training
227
  // desc = **Note**: to avoid potential instability due to numerical issues, please set ``force_col_wise=true`` or ``force_row_wise=true`` when setting ``deterministic=true``
Guolin Ke's avatar
Guolin Ke committed
228
229
  bool deterministic = false;

Guolin Ke's avatar
Guolin Ke committed
230
231
232
233
  #pragma endregion

  #pragma region Learning Control Parameters

234
235
236
237
  // desc = used only with ``cpu`` device type
  // desc = set this to ``true`` to force col-wise histogram building
  // desc = enabling this is recommended when:
  // descl2 = the number of columns is large, or the total number of bins is large
Nikita Titov's avatar
Nikita Titov committed
238
  // descl2 = ``num_threads`` is large, e.g. ``> 20``
239
240
241
  // descl2 = you want to reduce memory cost
  // desc = **Note**: when both ``force_col_wise`` and ``force_row_wise`` are ``false``, LightGBM will firstly try them both, and then use the faster one. To remove the overhead of testing set the faster one to ``true`` manually
  // desc = **Note**: this parameter cannot be used at the same time with ``force_row_wise``, choose only one of them
242
243
  bool force_col_wise = false;

244
245
246
247
  // desc = used only with ``cpu`` device type
  // desc = set this to ``true`` to force row-wise histogram building
  // desc = enabling this is recommended when:
  // descl2 = the number of data points is large, and the total number of bins is relatively small
Nikita Titov's avatar
Nikita Titov committed
248
  // descl2 = ``num_threads`` is relatively small, e.g. ``<= 16``
249
250
251
252
  // descl2 = you want to use small ``bagging_fraction`` or ``goss`` boosting to speed up
  // desc = **Note**: setting this to ``true`` will double the memory cost for Dataset object. If you have not enough memory, you can try setting ``force_col_wise=true``
  // desc = **Note**: when both ``force_col_wise`` and ``force_row_wise`` are ``false``, LightGBM will firstly try them both, and then use the faster one. To remove the overhead of testing set the faster one to ``true`` manually
  // desc = **Note**: this parameter cannot be used at the same time with ``force_col_wise``, choose only one of them
253
254
  bool force_row_wise = false;

255
256
257
258
259
  // alias = hist_pool_size
  // desc = max cache size in MB for historical histogram
  // desc = ``< 0`` means no limit
  double histogram_pool_size = -1.0;

260
  // desc = limit the max depth for tree model. This is used to deal with over-fitting when ``#data`` is small. Tree still grows leaf-wise
261
  // desc = ``<= 0`` means no limit
Guolin Ke's avatar
Guolin Ke committed
262
263
  int max_depth = -1;

264
  // alias = min_data_per_leaf, min_data, min_child_samples, min_samples_leaf
265
266
  // check = >=0
  // desc = minimal number of data in one leaf. Can be used to deal with over-fitting
267
  // desc = **Note**: this is an approximation based on the Hessian, so occasionally you may observe splits which produce leaf nodes that have less than this many observations
Guolin Ke's avatar
Guolin Ke committed
268
269
  int min_data_in_leaf = 20;

270
271
272
  // alias = min_sum_hessian_per_leaf, min_sum_hessian, min_hessian, min_child_weight
  // check = >=0.0
  // desc = minimal sum hessian in one leaf. Like ``min_data_in_leaf``, it can be used to deal with over-fitting
Guolin Ke's avatar
Guolin Ke committed
273
274
  double min_sum_hessian_in_leaf = 1e-3;

275
276
277
278
279
280
281
  // alias = sub_row, subsample, bagging
  // check = >0.0
  // check = <=1.0
  // desc = like ``feature_fraction``, but this will randomly select part of data without resampling
  // desc = can be used to speed up training
  // desc = can be used to deal with over-fitting
  // desc = **Note**: to enable bagging, ``bagging_freq`` should be set to a non zero value as well
Guolin Ke's avatar
Guolin Ke committed
282
283
  double bagging_fraction = 1.0;

Guolin Ke's avatar
Guolin Ke committed
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
  // alias = pos_sub_row, pos_subsample, pos_bagging
  // check = >0.0
  // check = <=1.0
  // desc = used only in ``binary`` application
  // desc = used for imbalanced binary classification problem, will randomly sample ``#pos_samples * pos_bagging_fraction`` positive samples in bagging
  // desc = should be used together with ``neg_bagging_fraction``
  // desc = set this to ``1.0`` to disable
  // desc = **Note**: to enable this, you need to set ``bagging_freq`` and ``neg_bagging_fraction`` as well
  // desc = **Note**: if both ``pos_bagging_fraction`` and ``neg_bagging_fraction`` are set to ``1.0``,  balanced bagging is disabled
  // desc = **Note**: if balanced bagging is enabled, ``bagging_fraction`` will be ignored
  double pos_bagging_fraction = 1.0;

  // alias = neg_sub_row, neg_subsample, neg_bagging
  // check = >0.0
  // check = <=1.0
  // desc = used only in ``binary`` application
  // desc = used for imbalanced binary classification problem, will randomly sample ``#neg_samples * neg_bagging_fraction`` negative samples in bagging
  // desc = should be used together with ``pos_bagging_fraction``
  // desc = set this to ``1.0`` to disable
  // desc = **Note**: to enable this, you need to set ``bagging_freq`` and ``pos_bagging_fraction`` as well
  // desc = **Note**: if both ``pos_bagging_fraction`` and ``neg_bagging_fraction`` are set to ``1.0``,  balanced bagging is disabled
  // desc = **Note**: if balanced bagging is enabled, ``bagging_fraction`` will be ignored
  double neg_bagging_fraction = 1.0;

308
309
  // alias = subsample_freq
  // desc = frequency for bagging
310
  // desc = ``0`` means disable bagging; ``k`` means perform bagging at every ``k`` iteration. Every ``k``-th iteration, LightGBM will randomly select ``bagging_fraction * 100 %`` of the data to use for the next ``k`` iterations
311
  // desc = **Note**: to enable bagging, ``bagging_fraction`` should be set to value smaller than ``1.0`` as well
Guolin Ke's avatar
Guolin Ke committed
312
313
314
315
316
317
318
  int bagging_freq = 0;

  // alias = bagging_fraction_seed
  // desc = random seed for bagging
  int bagging_seed = 3;

  // alias = sub_feature, colsample_bytree
319
320
  // check = >0.0
  // check = <=1.0
321
  // desc = LightGBM will randomly select a subset of features on each iteration (tree) if ``feature_fraction`` is smaller than ``1.0``. For example, if you set it to ``0.8``, LightGBM will select 80% of features before training each tree
322
323
  // desc = can be used to speed up training
  // desc = can be used to deal with over-fitting
Guolin Ke's avatar
Guolin Ke committed
324
325
  double feature_fraction = 1.0;

326
327
328
  // alias = sub_feature_bynode, colsample_bynode
  // check = >0.0
  // check = <=1.0
329
  // desc = LightGBM will randomly select a subset of features on each tree node if ``feature_fraction_bynode`` is smaller than ``1.0``. For example, if you set it to ``0.8``, LightGBM will select 80% of features at each tree node
330
331
332
333
334
  // desc = can be used to deal with over-fitting
  // desc = **Note**: unlike ``feature_fraction``, this cannot speed up training
  // desc = **Note**: if both ``feature_fraction`` and ``feature_fraction_bynode`` are smaller than ``1.0``, the final fraction of each node is ``feature_fraction * feature_fraction_bynode``
  double feature_fraction_bynode = 1.0;

335
  // desc = random seed for ``feature_fraction``
Guolin Ke's avatar
Guolin Ke committed
336
337
  int feature_fraction_seed = 2;

Nikita Titov's avatar
Nikita Titov committed
338
  // alias = extra_tree
339
340
  // desc = use extremely randomized trees
  // desc = if set to ``true``, when evaluating node splits LightGBM will check only one randomly-chosen threshold for each feature
341
  // desc = can be used to speed up training
342
343
344
345
346
347
  // desc = can be used to deal with over-fitting
  bool extra_trees = false;

  // desc = random seed for selecting thresholds when ``extra_trees`` is true
  int extra_seed = 6;

348
  // alias = early_stopping_rounds, early_stopping, n_iter_no_change
349
350
  // desc = will stop training if one metric of one validation data doesn't improve in last ``early_stopping_round`` rounds
  // desc = ``<= 0`` means disable
351
  // desc = can be used to speed up training
Guolin Ke's avatar
Guolin Ke committed
352
353
  int early_stopping_round = 0;

354
  // desc = LightGBM allows you to provide multiple evaluation metrics. Set this to ``true``, if you want to use only the first metric for early stopping
355
356
  bool first_metric_only = false;

357
358
359
360
  // alias = max_tree_output, max_leaf_output
  // desc = used to limit the max output of tree leaves
  // desc = ``<= 0`` means no constraint
  // desc = the final max output of leaves is ``learning_rate * max_delta_step``
Guolin Ke's avatar
Guolin Ke committed
361
362
  double max_delta_step = 0.0;

363
  // alias = reg_alpha, l1_regularization
364
365
  // check = >=0.0
  // desc = L1 regularization
Guolin Ke's avatar
Guolin Ke committed
366
367
  double lambda_l1 = 0.0;

368
  // alias = reg_lambda, lambda, l2_regularization
369
  // check = >=0.0
Guolin Ke's avatar
Guolin Ke committed
370
371
372
  // desc = L2 regularization
  double lambda_l2 = 0.0;

373
  // check = >=0.0
374
  // desc = linear tree regularization, corresponds to the parameter ``lambda`` in Eq. 3 of `Gradient Boosting with Piece-Wise Linear Regression Trees <https://arxiv.org/pdf/1802.05640.pdf>`__
375
376
  double linear_lambda = 0.0;

377
378
379
  // alias = min_split_gain
  // check = >=0.0
  // desc = the minimal gain to perform split
380
  // desc = can be used to speed up training
Guolin Ke's avatar
Guolin Ke committed
381
382
  double min_gain_to_split = 0.0;

383
  // alias = rate_drop
384
385
386
  // check = >=0.0
  // check = <=1.0
  // desc = used only in ``dart``
387
  // desc = dropout rate: a fraction of previous trees to drop during the dropout
Guolin Ke's avatar
Guolin Ke committed
388
389
  double drop_rate = 0.1;

390
  // desc = used only in ``dart``
391
  // desc = max number of dropped trees during one boosting iteration
392
  // desc = ``<=0`` means no limit
Guolin Ke's avatar
Guolin Ke committed
393
394
  int max_drop = 50;

395
396
397
  // check = >=0.0
  // check = <=1.0
  // desc = used only in ``dart``
398
  // desc = probability of skipping the dropout procedure during a boosting iteration
Guolin Ke's avatar
Guolin Ke committed
399
400
  double skip_drop = 0.5;

401
402
  // desc = used only in ``dart``
  // desc = set this to ``true``, if you want to use xgboost dart mode
Guolin Ke's avatar
Guolin Ke committed
403
404
  bool xgboost_dart_mode = false;

405
406
  // desc = used only in ``dart``
  // desc = set this to ``true``, if you want to use uniform drop
Guolin Ke's avatar
Guolin Ke committed
407
408
  bool uniform_drop = false;

409
410
  // desc = used only in ``dart``
  // desc = random seed to choose dropping models
Guolin Ke's avatar
Guolin Ke committed
411
412
  int drop_seed = 4;

413
414
415
416
  // check = >=0.0
  // check = <=1.0
  // desc = used only in ``goss``
  // desc = the retain ratio of large gradient data
Guolin Ke's avatar
Guolin Ke committed
417
418
  double top_rate = 0.2;

419
420
421
422
  // check = >=0.0
  // check = <=1.0
  // desc = used only in ``goss``
  // desc = the retain ratio of small gradient data
Guolin Ke's avatar
Guolin Ke committed
423
424
  double other_rate = 0.1;

425
426
  // check = >0
  // desc = minimal number of data per categorical group
Guolin Ke's avatar
Guolin Ke committed
427
428
  int min_data_per_group = 100;

429
430
  // check = >0
  // desc = used for the categorical features
431
432
  // desc = limit number of split points considered for categorical features. See `the documentation on how LightGBM finds optimal splits for categorical features <./Features.rst#optimal-split-for-categorical-features>`_ for more details
  // desc = can be used to speed up training
Guolin Ke's avatar
Guolin Ke committed
433
434
  int max_cat_threshold = 32;

435
436
  // check = >=0.0
  // desc = used for the categorical features
437
  // desc = L2 regularization in categorical split
438
  double cat_l2 = 10.0;
Guolin Ke's avatar
Guolin Ke committed
439

440
441
442
443
  // check = >=0.0
  // desc = used for the categorical features
  // desc = this can reduce the effect of noises in categorical features, especially for categories with few data
  double cat_smooth = 10.0;
444

445
446
  // check = >0
  // desc = when number of categories of one feature smaller than or equal to ``max_cat_to_onehot``, one-vs-other split algorithm will be used
Guolin Ke's avatar
Guolin Ke committed
447
448
449
  int max_cat_to_onehot = 4;

  // alias = topk
450
  // check = >0
451
  // desc = used only in ``voting`` tree learner, refer to `Voting parallel <./Parallel-Learning-Guide.rst#choose-appropriate-parallel-algorithm>`__
452
  // desc = set this to larger value for more accurate result, but it will slow down the training speed
Guolin Ke's avatar
Guolin Ke committed
453
454
455
  int top_k = 20;

  // type = multi-int
456
  // alias = mc, monotone_constraint, monotonic_cst
457
458
459
460
  // default = None
  // desc = used for constraints of monotonic features
  // desc = ``1`` means increasing, ``-1`` means decreasing, ``0`` means non-constraint
  // desc = you need to specify all features in order. For example, ``mc=-1,0,1`` means decreasing for 1st feature, non-constraint for 2nd feature and increasing for the 3rd feature
Guolin Ke's avatar
Guolin Ke committed
461
  std::vector<int8_t> monotone_constraints;
Guolin Ke's avatar
Guolin Ke committed
462

Nikita Titov's avatar
Nikita Titov committed
463
  // type = enum
464
  // alias = monotone_constraining_method, mc_method
465
  // options = basic, intermediate, advanced
466
467
468
  // desc = used only if ``monotone_constraints`` is set
  // desc = monotone constraints method
  // descl2 = ``basic``, the most basic monotone constraints method. It does not slow the library at all, but over-constrains the predictions
469
470
  // descl2 = ``intermediate``, a `more advanced method <https://hal.archives-ouvertes.fr/hal-02862802/document>`__, which may slow the library very slightly. However, this method is much less constraining than the basic method and should significantly improve the results
  // descl2 = ``advanced``, an `even more advanced method <https://hal.archives-ouvertes.fr/hal-02862802/document>`__, which may slow the library. However, this method is even less constraining than the intermediate method and should again significantly improve the results
471
472
  std::string monotone_constraints_method = "basic";

473
474
475
  // alias = monotone_splits_penalty, ms_penalty, mc_penalty
  // check = >=0.0
  // desc = used only if ``monotone_constraints`` is set
476
  // desc = `monotone penalty <https://hal.archives-ouvertes.fr/hal-02862802/document>`__: a penalization parameter X forbids any monotone splits on the first X (rounded down) level(s) of the tree. The penalty applied to monotone splits on a given depth is a continuous, increasing function the penalization parameter
477
478
479
  // desc = if ``0.0`` (the default), no penalization is applied
  double monotone_penalty = 0.0;

Guolin Ke's avatar
Guolin Ke committed
480
  // type = multi-double
481
  // alias = feature_contrib, fc, fp, feature_penalty
Guolin Ke's avatar
Guolin Ke committed
482
483
484
485
  // default = None
  // desc = used to control feature's split gain, will use ``gain[i] = max(0, feature_contri[i]) * gain[i]`` to replace the split gain of i-th feature
  // desc = you need to specify all features in order
  std::vector<double> feature_contri;
486

487
488
489
490
  // alias = fs, forced_splits_filename, forced_splits_file, forced_splits
  // desc = path to a ``.json`` file that specifies splits to force at the top of every decision tree before best-first learning commences
  // desc = ``.json`` file can be arbitrarily nested, and each split contains ``feature``, ``threshold`` fields, as well as ``left`` and ``right`` fields representing subsplits
  // desc = categorical splits are forced in a one-hot fashion, with ``left`` representing the split containing the feature value and ``right`` representing other values
491
  // desc = **Note**: the forced split logic will be ignored, if the split makes gain worse
492
  // desc = see `this file <https://github.com/microsoft/LightGBM/tree/master/examples/binary_classification/forced_splits.json>`__ as an example
Guolin Ke's avatar
Guolin Ke committed
493
494
  std::string forcedsplits_filename = "";

Guolin Ke's avatar
Guolin Ke committed
495
496
497
498
499
500
  // check = >=0.0
  // check = <=1.0
  // desc = decay rate of ``refit`` task, will use ``leaf_output = refit_decay_rate * old_leaf_output + (1.0 - refit_decay_rate) * new_leaf_output`` to refit trees
  // desc = used only in ``refit`` task in CLI version or as argument in ``refit`` function in language-specific package
  double refit_decay_rate = 0.9;

501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
  // check = >=0.0
  // desc = cost-effective gradient boosting multiplier for all penalties
  double cegb_tradeoff = 1.0;

  // check = >=0.0
  // desc = cost-effective gradient-boosting penalty for splitting a node
  double cegb_penalty_split = 0.0;

  // type = multi-double
  // default = 0,0,...,0
  // desc = cost-effective gradient boosting penalty for using a feature
  // desc = applied per data point
  std::vector<double> cegb_penalty_feature_lazy;

  // type = multi-double
  // default = 0,0,...,0
  // desc = cost-effective gradient boosting penalty for using a feature
  // desc = applied once per forest
519
  std::vector<double> cegb_penalty_feature_coupled;
520

Belinda Trotta's avatar
Belinda Trotta committed
521
522
523
524
525
  // check = >= 0.0
  // desc = controls smoothing applied to tree nodes
  // desc = helps prevent overfitting on leaves with few samples
  // desc = if set to zero, no smoothing is applied
  // desc = if ``path_smooth > 0`` then ``min_data_in_leaf`` must be at least ``2``
526
  // desc = larger values give stronger regularization
Belinda Trotta's avatar
Belinda Trotta committed
527
528
529
530
  // descl2 = the weight of each node is ``(n / path_smooth) * w + w_p / (n / path_smooth + 1)``, where ``n`` is the number of samples in the node, ``w`` is the optimal node weight to minimise the loss (approximately ``-sum_gradients / sum_hessians``), and ``w_p`` is the weight of the parent node
  // descl2 = note that the parent output ``w_p`` itself has smoothing applied, unless it is the root node, so that the smoothing effect accumulates with the tree depth
  double path_smooth = 0;

531
532
533
534
  // desc = controls which features can appear in the same branch
  // desc = by default interaction constraints are disabled, to enable them you can specify
  // descl2 = for CLI, lists separated by commas, e.g. ``[0,1,2],[2,3]``
  // descl2 = for Python-package, list of lists, e.g. ``[[0, 1, 2], [2, 3]]``
535
  // descl2 = for R-package, list of character or numeric vectors, e.g. ``list(c("var1", "var2", "var3"), c("var3", "var4"))`` or ``list(c(1L, 2L, 3L), c(3L, 4L))``. Numeric vectors should use 1-based indexing, where ``1L`` is the first feature, ``2L`` is the second feature, etc
536
537
538
  // desc = any two features can only appear in the same branch only if there exists a constraint containing both features
  std::string interaction_constraints = "";

539
540
  // alias = verbose
  // desc = controls the level of LightGBM's verbosity
541
  // desc = ``< 0``: Fatal, ``= 0``: Error (Warning), ``= 1``: Info, ``> 1``: Debug
542
543
  int verbosity = 1;

544
  // [no-save]
545
546
547
548
549
550
551
  // alias = model_input, model_in
  // desc = filename of input model
  // desc = for ``prediction`` task, this model will be applied to prediction data
  // desc = for ``train`` task, training will be continued from this model
  // desc = **Note**: can be used only in CLI version
  std::string input_model = "";

552
  // [no-save]
553
554
555
556
557
  // alias = model_output, model_out
  // desc = filename of output model in training
  // desc = **Note**: can be used only in CLI version
  std::string output_model = "LightGBM_model.txt";

558
559
560
561
562
  // desc = the feature importance type in the saved model file
  // desc = ``0``: count-based feature importance (numbers of splits are counted); ``1``: gain-based feature importance (values of gain are counted)
  // desc = **Note**: can be used only in CLI version
  int saved_feature_importance_type = 0;

563
  // [no-save]
564
565
566
567
568
569
570
571
572
573
574
575
  // alias = save_period
  // desc = frequency of saving model file snapshot
  // desc = set this to positive value to enable this function. For example, the model file will be snapshotted at each iteration if ``snapshot_freq=1``
  // desc = **Note**: can be used only in CLI version
  int snapshot_freq = -1;

  #pragma endregion

  #pragma region IO Parameters

  #pragma region Dataset Parameters

Nikita Titov's avatar
Nikita Titov committed
576
577
578
579
580
581
582
583
584
585
586
587
588
  // alias = linear_trees
  // desc = fit piecewise linear gradient boosting tree
  // descl2 = tree splits are chosen in the usual way, but the model at each leaf is linear instead of constant
  // descl2 = the linear model at each leaf includes all the numerical features in that leaf's branch
  // descl2 = categorical features are used for splits as normal but are not used in the linear models
  // descl2 = missing values should not be encoded as ``0``. Use ``np.nan`` for Python, ``NA`` for the CLI, and ``NA``, ``NA_real_``, or ``NA_integer_`` for R
  // descl2 = it is recommended to rescale data before training so that features have similar mean and standard deviation
  // descl2 = **Note**: only works with CPU and ``serial`` tree learner
  // descl2 = **Note**: ``regression_l1`` objective is not supported with linear tree boosting
  // descl2 = **Note**: setting ``linear_tree=true`` significantly increases the memory use of LightGBM
  // descl2 = **Note**: if you specify ``monotone_constraints``, constraints will be enforced when choosing the split points, but not when fitting the linear models on leaves
  bool linear_tree = false;

589
  // alias = max_bins
590
591
592
593
  // check = >1
  // desc = max number of bins that feature values will be bucketed in
  // desc = small number of bins may reduce training accuracy but may increase general power (deal with over-fitting)
  // desc = LightGBM will auto compress memory according to ``max_bin``. For example, LightGBM will use ``uint8_t`` for feature value if ``max_bin=255``
594
  int max_bin = 255;
Guolin Ke's avatar
Guolin Ke committed
595

Belinda Trotta's avatar
Belinda Trotta committed
596
597
598
599
  // type = multi-int
  // default = None
  // desc = max number of bins for each feature
  // desc = if not specified, will use ``max_bin`` for all features
600
  std::vector<int32_t> max_bin_by_feature;
Belinda Trotta's avatar
Belinda Trotta committed
601

602
603
604
  // check = >0
  // desc = minimal number of data inside one bin
  // desc = use this to avoid one-data-one-bin (potential over-fitting)
Guolin Ke's avatar
Guolin Ke committed
605
606
  int min_data_in_bin = 3;

607
608
  // alias = subsample_for_bin
  // check = >0
609
610
  // desc = number of data that sampled to construct feature discrete bins
  // desc = setting this to larger value will give better training result, but may increase data loading time
611
  // desc = set this to larger value if data is very sparse
612
  // desc = **Note**: don't set this to small values, otherwise, you may encounter unexpected errors and poor accuracy
613
614
  int bin_construct_sample_cnt = 200000;

615
  // alias = data_seed
616
  // desc = random seed for sampling data to construct histogram bins
Guolin Ke's avatar
Guolin Ke committed
617
  int data_random_seed = 1;
Guolin Ke's avatar
Guolin Ke committed
618

619
620
621
  // alias = is_sparse, enable_sparse, sparse
  // desc = used to enable/disable sparse optimization
  bool is_enable_sparse = true;
Guolin Ke's avatar
Guolin Ke committed
622

623
624
625
626
627
628
629
630
  // alias = is_enable_bundle, bundle
  // desc = set this to ``false`` to disable Exclusive Feature Bundling (EFB), which is described in `LightGBM: A Highly Efficient Gradient Boosting Decision Tree <https://papers.nips.cc/paper/6907-lightgbm-a-highly-efficient-gradient-boosting-decision-tree>`__
  // desc = **Note**: disabling this may cause the slow training speed for sparse datasets
  bool enable_bundle = true;

  // desc = set this to ``false`` to disable the special handle of missing value
  bool use_missing = true;

631
  // desc = set this to ``true`` to treat all zero as missing values (including the unshown values in LibSVM / sparse matrices)
632
633
634
  // desc = set this to ``false`` to use ``na`` for representing missing values
  bool zero_as_missing = false;

635
  // desc = set this to ``true`` (the default) to tell LightGBM to ignore the features that are unsplittable based on ``min_data_in_leaf``
636
637
638
639
640
  // desc = as dataset object is initialized only once and cannot be changed after that, you may need to set this to ``false`` when searching parameters with ``min_data_in_leaf``, otherwise features are filtered by ``min_data_in_leaf`` firstly if you don't reconstruct dataset object
  // desc = **Note**: setting this to ``false`` may slow down the training
  bool feature_pre_filter = true;

  // alias = is_pre_partition
641
  // desc = used for distributed learning (excluding the ``feature_parallel`` mode)
642
643
644
  // desc = ``true`` if training data are pre-partitioned, and different machines use different partitions
  bool pre_partition = false;

645
646
647
  // alias = two_round_loading, use_two_round_loading
  // desc = set this to ``true`` if data file is too big to fit in memory
  // desc = by default, LightGBM will map data file to memory and load features from memory. This will provide faster data loading speed, but may cause run out of memory error when the data file is very big
648
  // desc = **Note**: works only in case of loading data directly from text file
Guolin Ke's avatar
Guolin Ke committed
649
650
651
  bool two_round = false;

  // alias = has_header
652
  // desc = set this to ``true`` if input data has header
653
  // desc = **Note**: works only in case of loading data directly from text file
Guolin Ke's avatar
Guolin Ke committed
654
655
  bool header = false;

656
657
658
659
660
  // type = int or string
  // alias = label
  // desc = used to specify the label column
  // desc = use number for index, e.g. ``label=0`` means column\_0 is the label
  // desc = add a prefix ``name:`` for column name, e.g. ``label=name:is_click``
661
  // desc = if omitted, the first column in the training data is used as the label
662
  // desc = **Note**: works only in case of loading data directly from text file
Guolin Ke's avatar
Guolin Ke committed
663
  std::string label_column = "";
Guolin Ke's avatar
Guolin Ke committed
664

665
666
667
668
669
  // type = int or string
  // alias = weight
  // desc = used to specify the weight column
  // desc = use number for index, e.g. ``weight=0`` means column\_0 is the weight
  // desc = add a prefix ``name:`` for column name, e.g. ``weight=name:weight``
670
  // desc = **Note**: works only in case of loading data directly from text file
671
  // desc = **Note**: index starts from ``0`` and it doesn't count the label column when passing type is ``int``, e.g. when label is column\_0, and weight is column\_1, the correct parameter is ``weight=0``
Guolin Ke's avatar
Guolin Ke committed
672
  std::string weight_column = "";
Guolin Ke's avatar
Guolin Ke committed
673

674
675
676
677
678
  // type = int or string
  // alias = group, group_id, query_column, query, query_id
  // desc = used to specify the query/group id column
  // desc = use number for index, e.g. ``query=0`` means column\_0 is the query id
  // desc = add a prefix ``name:`` for column name, e.g. ``query=name:query_id``
679
  // desc = **Note**: works only in case of loading data directly from text file
680
  // desc = **Note**: data should be grouped by query\_id, for more information, see `Query Data <#query-data>`__
681
  // desc = **Note**: index starts from ``0`` and it doesn't count the label column when passing type is ``int``, e.g. when label is column\_0 and query\_id is column\_1, the correct parameter is ``query=0``
Guolin Ke's avatar
Guolin Ke committed
682
  std::string group_column = "";
Guolin Ke's avatar
Guolin Ke committed
683

684
  // type = multi-int or string
Guolin Ke's avatar
Guolin Ke committed
685
  // alias = ignore_feature, blacklist
686
687
688
  // desc = used to specify some ignoring columns in training
  // desc = use number for index, e.g. ``ignore_column=0,1,2`` means column\_0, column\_1 and column\_2 will be ignored
  // desc = add a prefix ``name:`` for column name, e.g. ``ignore_column=name:c1,c2,c3`` means c1, c2 and c3 will be ignored
689
  // desc = **Note**: works only in case of loading data directly from text file
690
  // desc = **Note**: index starts from ``0`` and it doesn't count the label column when passing type is ``int``
691
  // desc = **Note**: despite the fact that specified columns will be completely ignored during the training, they still should have a valid format allowing LightGBM to load file successfully
Guolin Ke's avatar
Guolin Ke committed
692
  std::string ignore_column = "";
693

694
  // type = multi-int or string
695
  // alias = cat_feature, categorical_column, cat_column, categorical_features
696
697
698
  // desc = used to specify categorical features
  // desc = use number for index, e.g. ``categorical_feature=0,1,2`` means column\_0, column\_1 and column\_2 are categorical features
  // desc = add a prefix ``name:`` for column name, e.g. ``categorical_feature=name:c1,c2,c3`` means c1, c2 and c3 are categorical features
699
  // desc = **Note**: only supports categorical with ``int`` type (not applicable for data represented as pandas DataFrame in Python-package)
700
701
  // desc = **Note**: index starts from ``0`` and it doesn't count the label column when passing type is ``int``
  // desc = **Note**: all values should be less than ``Int32.MaxValue`` (2147483647)
702
  // desc = **Note**: using large values could be memory consuming. Tree decision rule works best when categorical features are presented by consecutive integers starting from zero
703
  // desc = **Note**: all negative values will be treated as **missing values**
704
  // desc = **Note**: the output cannot be monotonically constrained with respect to a categorical feature
Guolin Ke's avatar
Guolin Ke committed
705
706
  std::string categorical_feature = "";

707
708
709
710
711
  // desc = path to a ``.json`` file that specifies bin upper bounds for some or all features
  // desc = ``.json`` file should contain an array of objects, each containing the word ``feature`` (integer feature index) and ``bin_upper_bound`` (array of thresholds for binning)
  // desc = see `this file <https://github.com/microsoft/LightGBM/tree/master/examples/regression/forced_bins.json>`__ as an example
  std::string forcedbins_filename = "";

712
  // [no-save]
713
714
715
716
717
718
  // alias = is_save_binary, is_save_binary_file
  // desc = if ``true``, LightGBM will save the dataset (including validation data) to a binary file. This speed ups the data loading for the next time
  // desc = **Note**: ``init_score`` is not saved in binary file
  // desc = **Note**: can be used only in CLI version; for language-specific packages you can use the correspondent function
  bool save_binary = false;

Chen Yufei's avatar
Chen Yufei committed
719
720
721
722
  // desc = use precise floating point number parsing for text parser (e.g. CSV, TSV, LibSVM input)
  // desc = **Note**: setting this to ``true`` may lead to much slower text parsing
  bool precise_float_parser = false;

723
724
725
726
  #pragma endregion

  #pragma region Predict Parameters

727
728
729
730
731
732
  // [no-save]
  // desc = used only in ``prediction`` task
  // desc = used to specify from which iteration to start the prediction
  // desc = ``<= 0`` means from the first iteration
  int start_iteration_predict = 0;

733
  // [no-save]
734
735
736
737
738
  // desc = used only in ``prediction`` task
  // desc = used to specify how many trained iterations will be used in prediction
  // desc = ``<= 0`` means no limit
  int num_iteration_predict = -1;

739
  // [no-save]
740
741
742
743
  // alias = is_predict_raw_score, predict_rawscore, raw_score
  // desc = used only in ``prediction`` task
  // desc = set this to ``true`` to predict only the raw scores
  // desc = set this to ``false`` to predict transformed scores
Guolin Ke's avatar
Guolin Ke committed
744
745
  bool predict_raw_score = false;

746
  // [no-save]
747
748
749
  // alias = is_predict_leaf_index, leaf_index
  // desc = used only in ``prediction`` task
  // desc = set this to ``true`` to predict with leaf index of all trees
Guolin Ke's avatar
Guolin Ke committed
750
751
  bool predict_leaf_index = false;

752
  // [no-save]
753
754
  // alias = is_predict_contrib, contrib
  // desc = used only in ``prediction`` task
755
  // desc = set this to ``true`` to estimate `SHAP values <https://arxiv.org/abs/1706.06060>`__, which represent how each feature contributes to each prediction
756
  // desc = produces ``#features + 1`` values where the last value is the expected value of the model output over the training data
757
  // desc = **Note**: if you want to get more explanation for your model's predictions using SHAP values like SHAP interaction values, you can install `shap package <https://github.com/slundberg/shap>`__
Nikita Titov's avatar
Nikita Titov committed
758
  // desc = **Note**: unlike the shap package, with ``predict_contrib`` we return a matrix with an extra column, where the last column is the expected value
759
  // desc = **Note**: this feature is not implemented for linear trees
Guolin Ke's avatar
Guolin Ke committed
760
761
  bool predict_contrib = false;

762
  // [no-save]
763
  // desc = used only in ``prediction`` task
764
765
766
767
768
  // desc = control whether or not LightGBM raises an error when you try to predict on data with a different number of features than the training data
  // desc = if ``false`` (the default), a fatal error will be raised if the number of features in the dataset you predict on differs from the number seen during training
  // desc = if ``true``, LightGBM will attempt to predict on whatever data you provide. This is dangerous because you might get incorrect predictions, but you could use it in situations where it is difficult or expensive to generate some features and you are very confident that they were never chosen for splits in the model
  // desc = **Note**: be very careful setting this parameter to ``true``
  bool predict_disable_shape_check = false;
Guolin Ke's avatar
Guolin Ke committed
769

770
  // [no-save]
771
  // desc = used only in ``prediction`` task
772
  // desc = used only in ``classification`` and ``ranking`` applications
773
  // desc = if ``true``, will use early-stopping to speed up the prediction. May affect the accuracy
774
  // desc = **Note**: cannot be used with ``rf`` boosting type or custom objective function
775
  bool pred_early_stop = false;
776

777
  // [no-save]
778
779
  // desc = used only in ``prediction`` task
  // desc = the frequency of checking early-stopping prediction
780
  int pred_early_stop_freq = 10;
Guolin Ke's avatar
Guolin Ke committed
781

782
  // [no-save]
783
784
  // desc = used only in ``prediction`` task
  // desc = the threshold of margin in early-stopping prediction
Guolin Ke's avatar
Guolin Ke committed
785
  double pred_early_stop_margin = 10.0;
Guolin Ke's avatar
Guolin Ke committed
786

787
  // [no-save]
788
  // alias = predict_result, prediction_result, predict_name, prediction_name, pred_name, name_pred
789
  // desc = used only in ``prediction`` task
790
791
792
793
794
795
796
  // desc = filename of prediction result
  // desc = **Note**: can be used only in CLI version
  std::string output_result = "LightGBM_predict_result.txt";

  #pragma endregion

  #pragma region Convert Parameters
797

798
  // [no-save]
799
  // desc = used only in ``convert_model`` task
800
  // desc = only ``cpp`` is supported yet; for conversion model to other languages consider using `m2cgen <https://github.com/BayesWitnesses/m2cgen>`__ utility
801
  // desc = if ``convert_model_language`` is set and ``task=train``, the model will be also converted
802
  // desc = **Note**: can be used only in CLI version
Guolin Ke's avatar
Guolin Ke committed
803
804
  std::string convert_model_language = "";

805
  // [no-save]
806
807
808
  // alias = convert_model_file
  // desc = used only in ``convert_model`` task
  // desc = output filename of converted model
809
  // desc = **Note**: can be used only in CLI version
Guolin Ke's avatar
Guolin Ke committed
810
811
  std::string convert_model = "gbdt_prediction.cpp";

812
  #pragma endregion
Guolin Ke's avatar
Guolin Ke committed
813

814
815
  #pragma endregion

Guolin Ke's avatar
Guolin Ke committed
816
817
  #pragma region Objective Parameters

818
  // desc = used only in ``rank_xendcg`` objective
819
820
821
  // desc = random seed for objectives, if random process is needed
  int objective_seed = 5;

822
823
824
825
  // check = >0
  // alias = num_classes
  // desc = used only in ``multi-class`` classification application
  int num_class = 1;
Guolin Ke's avatar
Guolin Ke committed
826

827
  // alias = unbalance, unbalanced_sets
828
  // desc = used only in ``binary`` and ``multiclassova`` applications
829
  // desc = set this to ``true`` if training data are unbalanced
830
  // desc = **Note**: while enabling this should increase the overall performance metric of your model, it will also result in poor estimates of the individual class probabilities
831
832
  // desc = **Note**: this parameter cannot be used at the same time with ``scale_pos_weight``, choose only **one** of them
  bool is_unbalance = false;
Guolin Ke's avatar
Guolin Ke committed
833

834
  // check = >0.0
835
  // desc = used only in ``binary`` and ``multiclassova`` applications
836
  // desc = weight of labels with positive class
837
  // desc = **Note**: while enabling this should increase the overall performance metric of your model, it will also result in poor estimates of the individual class probabilities
838
839
  // desc = **Note**: this parameter cannot be used at the same time with ``is_unbalance``, choose only **one** of them
  double scale_pos_weight = 1.0;
Guolin Ke's avatar
Guolin Ke committed
840

841
842
843
844
  // check = >0.0
  // desc = used only in ``binary`` and ``multiclassova`` classification and in ``lambdarank`` applications
  // desc = parameter for the sigmoid function
  double sigmoid = 1.0;
Guolin Ke's avatar
Guolin Ke committed
845

846
  // desc = used only in ``regression``, ``binary``, ``multiclassova`` and ``cross-entropy`` applications
847
  // desc = adjusts initial score to the mean of labels for faster convergence
Guolin Ke's avatar
Guolin Ke committed
848
849
  bool boost_from_average = true;

850
851
852
853
  // desc = used only in ``regression`` application
  // desc = used to fit ``sqrt(label)`` instead of original values and prediction result will be also automatically converted to ``prediction^2``
  // desc = might be useful in case of large-range labels
  bool reg_sqrt = false;
Guolin Ke's avatar
Guolin Ke committed
854

855
856
857
858
  // check = >0.0
  // desc = used only in ``huber`` and ``quantile`` ``regression`` applications
  // desc = parameter for `Huber loss <https://en.wikipedia.org/wiki/Huber_loss>`__ and `Quantile regression <https://en.wikipedia.org/wiki/Quantile_regression>`__
  double alpha = 0.9;
Guolin Ke's avatar
Guolin Ke committed
859

860
861
862
863
  // check = >0.0
  // desc = used only in ``fair`` ``regression`` application
  // desc = parameter for `Fair loss <https://www.kaggle.com/c/allstate-claims-severity/discussion/24520>`__
  double fair_c = 1.0;
Guolin Ke's avatar
Guolin Ke committed
864

865
866
867
868
  // check = >0.0
  // desc = used only in ``poisson`` ``regression`` application
  // desc = parameter for `Poisson regression <https://en.wikipedia.org/wiki/Poisson_regression>`__ to safeguard optimization
  double poisson_max_delta_step = 0.7;
Guolin Ke's avatar
Guolin Ke committed
869

870
871
872
873
874
875
876
  // check = >=1.0
  // check = <2.0
  // desc = used only in ``tweedie`` ``regression`` application
  // desc = used to control the variance of the tweedie distribution
  // desc = set this closer to ``2`` to shift towards a **Gamma** distribution
  // desc = set this closer to ``1`` to shift towards a **Poisson** distribution
  double tweedie_variance_power = 1.5;
Guolin Ke's avatar
Guolin Ke committed
877

878
879
  // check = >0
  // desc = used only in ``lambdarank`` application
Nikita Titov's avatar
Nikita Titov committed
880
881
  // desc = controls the number of top-results to focus on during training, refer to "truncation level" in the Sec. 3 of `LambdaMART paper <https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/MSR-TR-2010-82.pdf>`__
  // desc = this parameter is closely related to the desirable cutoff ``k`` in the metric **NDCG@k** that we aim at optimizing the ranker for. The optimal setting for this parameter is likely to be slightly higher than ``k`` (e.g., ``k + 3``) to include more pairs of documents to train on, but perhaps not too high to avoid deviating too much from the desired target metric **NDCG@k**
882
  int lambdarank_truncation_level = 30;
Guolin Ke's avatar
Guolin Ke committed
883

884
885
  // desc = used only in ``lambdarank`` application
  // desc = set this to ``true`` to normalize the lambdas for different queries, and improve the performance for unbalanced data
886
887
  // desc = set this to ``false`` to enforce the original lambdarank algorithm
  bool lambdarank_norm = true;
888

889
890
891
892
893
894
895
  // type = multi-double
  // default = 0,1,3,7,15,31,63,...,2^30-1
  // desc = used only in ``lambdarank`` application
  // desc = relevant gain for labels. For example, the gain of label ``2`` is ``3`` in case of default label gains
  // desc = separate by ``,``
  std::vector<double> label_gain;

Guolin Ke's avatar
Guolin Ke committed
896
897
898
  #pragma endregion

  #pragma region Metric Parameters
899

Guolin Ke's avatar
Guolin Ke committed
900
  // [doc-only]
901
902
903
  // alias = metrics, metric_types
  // default = ""
  // type = multi-enum
904
  // desc = metric(s) to be evaluated on the evaluation set(s)
905
  // descl2 = ``""`` (empty string or not specified) means that metric corresponding to specified ``objective`` will be used (this is possible only for pre-defined objective functions, otherwise no evaluation metric will be added)
906
  // descl2 = ``"None"`` (string, **not** a ``None`` value) means that no metric will be registered, aliases: ``na``, ``null``, ``custom``
907
908
  // descl2 = ``l1``, absolute loss, aliases: ``mean_absolute_error``, ``mae``, ``regression_l1``
  // descl2 = ``l2``, square loss, aliases: ``mean_squared_error``, ``mse``, ``regression_l2``, ``regression``
909
  // descl2 = ``rmse``, root square loss, aliases: ``root_mean_squared_error``, ``l2_root``
910
911
912
913
914
915
916
917
  // descl2 = ``quantile``, `Quantile regression <https://en.wikipedia.org/wiki/Quantile_regression>`__
  // descl2 = ``mape``, `MAPE loss <https://en.wikipedia.org/wiki/Mean_absolute_percentage_error>`__, aliases: ``mean_absolute_percentage_error``
  // descl2 = ``huber``, `Huber loss <https://en.wikipedia.org/wiki/Huber_loss>`__
  // descl2 = ``fair``, `Fair loss <https://www.kaggle.com/c/allstate-claims-severity/discussion/24520>`__
  // descl2 = ``poisson``, negative log-likelihood for `Poisson regression <https://en.wikipedia.org/wiki/Poisson_regression>`__
  // descl2 = ``gamma``, negative log-likelihood for **Gamma** regression
  // descl2 = ``gamma_deviance``, residual deviance for **Gamma** regression
  // descl2 = ``tweedie``, negative log-likelihood for **Tweedie** regression
918
  // descl2 = ``ndcg``, `NDCG <https://en.wikipedia.org/wiki/Discounted_cumulative_gain#Normalized_DCG>`__, aliases: ``lambdarank``, ``rank_xendcg``, ``xendcg``, ``xe_ndcg``, ``xe_ndcg_mart``, ``xendcg_mart``
919
920
  // descl2 = ``map``, `MAP <https://makarandtapaswi.wordpress.com/2012/07/02/intuition-behind-average-precision-and-map/>`__, aliases: ``mean_average_precision``
  // descl2 = ``auc``, `AUC <https://en.wikipedia.org/wiki/Receiver_operating_characteristic#Area_under_the_curve>`__
921
  // descl2 = ``average_precision``, `average precision score <https://scikit-learn.org/stable/modules/generated/sklearn.metrics.average_precision_score.html>`__
922
923
  // descl2 = ``binary_logloss``, `log loss <https://en.wikipedia.org/wiki/Cross_entropy>`__, aliases: ``binary``
  // descl2 = ``binary_error``, for one sample: ``0`` for correct classification, ``1`` for error classification
Belinda Trotta's avatar
Belinda Trotta committed
924
  // descl2 = ``auc_mu``, `AUC-mu <http://proceedings.mlr.press/v97/kleiman19a/kleiman19a.pdf>`__
925
926
  // descl2 = ``multi_logloss``, log loss for multi-class classification, aliases: ``multiclass``, ``softmax``, ``multiclassova``, ``multiclass_ova``, ``ova``, ``ovr``
  // descl2 = ``multi_error``, error rate for multi-class classification
Guolin Ke's avatar
Guolin Ke committed
927
928
929
  // descl2 = ``cross_entropy``, cross-entropy (with optional linear weights), aliases: ``xentropy``
  // descl2 = ``cross_entropy_lambda``, "intensity-weighted" cross-entropy, aliases: ``xentlambda``
  // descl2 = ``kullback_leibler``, `Kullback-Leibler divergence <https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence>`__, aliases: ``kldiv``
930
  // desc = support multiple metrics, separated by ``,``
Guolin Ke's avatar
Guolin Ke committed
931
932
  std::vector<std::string> metric;

933
  // [no-save]
934
  // check = >0
Guolin Ke's avatar
Guolin Ke committed
935
936
  // alias = output_freq
  // desc = frequency for metric output
937
  // desc = **Note**: can be used only in CLI version
Guolin Ke's avatar
Guolin Ke committed
938
939
  int metric_freq = 1;

940
  // [no-save]
941
942
  // alias = training_metric, is_training_metric, train_metric
  // desc = set this to ``true`` to output metric result over training dataset
943
  // desc = **Note**: can be used only in CLI version
944
  bool is_provide_training_metric = false;
945

946
947
  // type = multi-int
  // default = 1,2,3,4,5
948
  // alias = ndcg_eval_at, ndcg_at, map_eval_at, map_at
949
  // desc = used only with ``ndcg`` and ``map`` metrics
950
  // desc = `NDCG <https://en.wikipedia.org/wiki/Discounted_cumulative_gain#Normalized_DCG>`__ and `MAP <https://makarandtapaswi.wordpress.com/2012/07/02/intuition-behind-average-precision-and-map/>`__ evaluation positions, separated by ``,``
Guolin Ke's avatar
Guolin Ke committed
951
  std::vector<int> eval_at;
Guolin Ke's avatar
Guolin Ke committed
952

Belinda Trotta's avatar
Belinda Trotta committed
953
954
955
956
957
958
959
960
  // check = >0
  // desc = used only with ``multi_error`` metric
  // desc = threshold for top-k multi-error metric
  // desc = the error on each sample is ``0`` if the true class is among the top ``multi_error_top_k`` predictions, and ``1`` otherwise
  // descl2 = more precisely, the error on a sample is ``0`` if there are at least ``num_classes - multi_error_top_k`` predictions strictly less than the prediction on the true class
  // desc = when ``multi_error_top_k=1`` this is equivalent to the usual multi-error metric
  int multi_error_top_k = 1;

Belinda Trotta's avatar
Belinda Trotta committed
961
962
963
964
965
966
967
968
969
  // type = multi-double
  // default = None
  // desc = used only with ``auc_mu`` metric
  // desc = list representing flattened matrix (in row-major order) giving loss weights for classification errors
  // desc = list should have ``n * n`` elements, where ``n`` is the number of classes
  // desc = the matrix co-ordinate ``[i, j]`` should correspond to the ``i * n + j``-th element of the list
  // desc = if not specified, will use equal weights for all classes
  std::vector<double> auc_mu_weights;

Guolin Ke's avatar
Guolin Ke committed
970
971
972
973
  #pragma endregion

  #pragma region Network Parameters

974
975
  // check = >0
  // alias = num_machine
976
  // desc = the number of machines for distributed learning application
977
  // desc = this parameter is needed to be set in both **socket** and **mpi** versions
Guolin Ke's avatar
Guolin Ke committed
978
  int num_machines = 1;
Guolin Ke's avatar
Guolin Ke committed
979

980
  // check = >0
981
  // default = 12400 (random for Dask-package)
982
983
984
  // alias = local_port, port
  // desc = TCP listen port for local machines
  // desc = **Note**: don't forget to allow this port in firewall settings before training
Guolin Ke's avatar
Guolin Ke committed
985
  int local_listen_port = 12400;
Guolin Ke's avatar
Guolin Ke committed
986

987
988
989
  // check = >0
  // desc = socket time-out in minutes
  int time_out = 120;
Guolin Ke's avatar
Guolin Ke committed
990

991
  // alias = machine_list_file, machine_list, mlist
992
  // desc = path of file that lists machines for this distributed learning application
993
  // desc = each line contains one IP and one port for one machine. The format is ``ip port`` (space as a separator)
994
  // desc = **Note**: can be used only in CLI version
Guolin Ke's avatar
Guolin Ke committed
995
  std::string machine_list_filename = "";
Guolin Ke's avatar
Guolin Ke committed
996

997
998
  // alias = workers, nodes
  // desc = list of machines in the following format: ``ip1:port1,ip2:port2``
999
  std::string machines = "";
Guolin Ke's avatar
Guolin Ke committed
1000

Guolin Ke's avatar
Guolin Ke committed
1001
1002
1003
1004
  #pragma endregion

  #pragma region GPU Parameters

1005
1006
  // desc = OpenCL platform ID. Usually each GPU vendor exposes one OpenCL platform
  // desc = ``-1`` means the system-wide default platform
1007
  // desc = **Note**: refer to `GPU Targets <./GPU-Targets.rst#query-opencl-devices-in-your-system>`__ for more details
Guolin Ke's avatar
Guolin Ke committed
1008
1009
  int gpu_platform_id = -1;

1010
1011
  // desc = OpenCL device ID in the specified platform. Each GPU in the selected platform has a unique device ID
  // desc = ``-1`` means the default device in the selected platform
1012
  // desc = **Note**: refer to `GPU Targets <./GPU-Targets.rst#query-opencl-devices-in-your-system>`__ for more details
Guolin Ke's avatar
Guolin Ke committed
1013
1014
  int gpu_device_id = -1;

1015
1016
  // desc = set this to ``true`` to use double precision math on GPU (by default single precision is used)
  // desc = **Note**: can be used only in OpenCL implementation, in CUDA implementation only double precision is currently supported
Guolin Ke's avatar
Guolin Ke committed
1017
1018
  bool gpu_use_dp = false;

1019
1020
1021
1022
1023
  // check = >0
  // desc = number of GPUs
  // desc = **Note**: can be used only in CUDA implementation
  int num_gpu = 1;

Guolin Ke's avatar
Guolin Ke committed
1024
1025
1026
  #pragma endregion

  #pragma endregion
Guolin Ke's avatar
Guolin Ke committed
1027

1028
1029
  size_t file_load_progress_interval_bytes = size_t(10) * 1024 * 1024 * 1024;

Guolin Ke's avatar
Guolin Ke committed
1030
  bool is_parallel = false;
1031
  bool is_data_based_parallel = false;
Guolin Ke's avatar
Guolin Ke committed
1032
  LIGHTGBM_EXPORT void Set(const std::unordered_map<std::string, std::string>& params);
jcipar's avatar
jcipar committed
1033
1034
  static const std::unordered_map<std::string, std::string>& alias_table();
  static const std::unordered_set<std::string>& parameter_set();
Belinda Trotta's avatar
Belinda Trotta committed
1035
  std::vector<std::vector<double>> auc_mu_weights_matrix;
1036
  std::vector<std::vector<int>> interaction_constraints_vector;
1037

Nikita Titov's avatar
Nikita Titov committed
1038
 private:
Guolin Ke's avatar
Guolin Ke committed
1039
  void CheckParamConflict();
Guolin Ke's avatar
Guolin Ke committed
1040
1041
  void GetMembersFromString(const std::unordered_map<std::string, std::string>& params);
  std::string SaveMembersToString() const;
Belinda Trotta's avatar
Belinda Trotta committed
1042
  void GetAucMuWeights();
1043
  void GetInteractionConstraints();
Guolin Ke's avatar
Guolin Ke committed
1044
1045
};

Guolin Ke's avatar
Guolin Ke committed
1046
inline bool Config::GetString(
Guolin Ke's avatar
Guolin Ke committed
1047
1048
  const std::unordered_map<std::string, std::string>& params,
  const std::string& name, std::string* out) {
1049
  if (params.count(name) > 0 && !params.at(name).empty()) {
Guolin Ke's avatar
Guolin Ke committed
1050
1051
1052
1053
1054
1055
    *out = params.at(name);
    return true;
  }
  return false;
}

Guolin Ke's avatar
Guolin Ke committed
1056
inline bool Config::GetInt(
Guolin Ke's avatar
Guolin Ke committed
1057
1058
  const std::unordered_map<std::string, std::string>& params,
  const std::string& name, int* out) {
1059
  if (params.count(name) > 0 && !params.at(name).empty()) {
1060
    if (!Common::AtoiAndCheck(params.at(name).c_str(), out)) {
1061
      Log::Fatal("Parameter %s should be of type int, got \"%s\"",
Guolin Ke's avatar
Guolin Ke committed
1062
                 name.c_str(), params.at(name).c_str());
1063
    }
Guolin Ke's avatar
Guolin Ke committed
1064
1065
1066
1067
1068
    return true;
  }
  return false;
}

Guolin Ke's avatar
Guolin Ke committed
1069
inline bool Config::GetDouble(
Guolin Ke's avatar
Guolin Ke committed
1070
  const std::unordered_map<std::string, std::string>& params,
1071
  const std::string& name, double* out) {
1072
  if (params.count(name) > 0 && !params.at(name).empty()) {
1073
    if (!Common::AtofAndCheck(params.at(name).c_str(), out)) {
1074
      Log::Fatal("Parameter %s should be of type double, got \"%s\"",
Guolin Ke's avatar
Guolin Ke committed
1075
                 name.c_str(), params.at(name).c_str());
1076
    }
Guolin Ke's avatar
Guolin Ke committed
1077
1078
1079
1080
1081
    return true;
  }
  return false;
}

Guolin Ke's avatar
Guolin Ke committed
1082
inline bool Config::GetBool(
Guolin Ke's avatar
Guolin Ke committed
1083
1084
  const std::unordered_map<std::string, std::string>& params,
  const std::string& name, bool* out) {
1085
  if (params.count(name) > 0 && !params.at(name).empty()) {
Guolin Ke's avatar
Guolin Ke committed
1086
    std::string value = params.at(name);
Guolin Ke's avatar
Guolin Ke committed
1087
    std::transform(value.begin(), value.end(), value.begin(), Common::tolower);
1088
    if (value == std::string("false") || value == std::string("-")) {
Guolin Ke's avatar
Guolin Ke committed
1089
      *out = false;
1090
    } else if (value == std::string("true") || value == std::string("+")) {
Guolin Ke's avatar
Guolin Ke committed
1091
      *out = true;
1092
    } else {
1093
      Log::Fatal("Parameter %s should be \"true\"/\"+\" or \"false\"/\"-\", got \"%s\"",
Guolin Ke's avatar
Guolin Ke committed
1094
                 name.c_str(), params.at(name).c_str());
Guolin Ke's avatar
Guolin Ke committed
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
    }
    return true;
  }
  return false;
}

struct ParameterAlias {
  static void KeyAliasTransform(std::unordered_map<std::string, std::string>* params) {
    std::unordered_map<std::string, std::string> tmp_map;
    for (const auto& pair : *params) {
jcipar's avatar
jcipar committed
1105
1106
      auto alias = Config::alias_table().find(pair.first);
      if (alias != Config::alias_table().end()) {  // found alias
Guolin Ke's avatar
Guolin Ke committed
1107
        auto alias_set = tmp_map.find(alias->second);
1108
1109
        if (alias_set != tmp_map.end()) {  // alias already set
                                           // set priority by length & alphabetically to ensure reproducible behavior
wxchan's avatar
wxchan committed
1110
1111
          if (alias_set->second.size() < pair.first.size() ||
            (alias_set->second.size() == pair.first.size() && alias_set->second < pair.first)) {
1112
            Log::Warning("%s is set with %s=%s, %s=%s will be ignored. Current value: %s=%s",
Guolin Ke's avatar
Guolin Ke committed
1113
1114
                         alias->second.c_str(), alias_set->second.c_str(), params->at(alias_set->second).c_str(),
                         pair.first.c_str(), pair.second.c_str(), alias->second.c_str(), params->at(alias_set->second).c_str());
wxchan's avatar
wxchan committed
1115
          } else {
1116
            Log::Warning("%s is set with %s=%s, will be overridden by %s=%s. Current value: %s=%s",
Guolin Ke's avatar
Guolin Ke committed
1117
1118
                         alias->second.c_str(), alias_set->second.c_str(), params->at(alias_set->second).c_str(),
                         pair.first.c_str(), pair.second.c_str(), alias->second.c_str(), pair.second.c_str());
wxchan's avatar
wxchan committed
1119
1120
            tmp_map[alias->second] = pair.first;
          }
1121
        } else {  // alias not set
wxchan's avatar
wxchan committed
1122
1123
          tmp_map.emplace(alias->second, pair.first);
        }
jcipar's avatar
jcipar committed
1124
      } else if (Config::parameter_set().find(pair.first) == Config::parameter_set().end()) {
wxchan's avatar
wxchan committed
1125
        Log::Warning("Unknown parameter: %s", pair.first.c_str());
Guolin Ke's avatar
Guolin Ke committed
1126
1127
1128
      }
    }
    for (const auto& pair : tmp_map) {
wxchan's avatar
wxchan committed
1129
      auto alias = params->find(pair.first);
1130
      if (alias == params->end()) {  // not find
wxchan's avatar
wxchan committed
1131
1132
1133
        params->emplace(pair.first, params->at(pair.second));
        params->erase(pair.second);
      } else {
Guolin Ke's avatar
Guolin Ke committed
1134
1135
1136
        Log::Warning("%s is set=%s, %s=%s will be ignored. Current value: %s=%s",
                     pair.first.c_str(), alias->second.c_str(), pair.second.c_str(), params->at(pair.second).c_str(),
                     pair.first.c_str(), alias->second.c_str());
Guolin Ke's avatar
Guolin Ke committed
1137
1138
1139
1140
1141
      }
    }
  }
};

1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
inline std::string ParseObjectiveAlias(const std::string& type) {
  if (type == std::string("regression") || type == std::string("regression_l2")
    || type == std::string("mean_squared_error") || type == std::string("mse") || type == std::string("l2")
    || type == std::string("l2_root") || type == std::string("root_mean_squared_error") || type == std::string("rmse")) {
    return "regression";
  } else if (type == std::string("regression_l1") || type == std::string("mean_absolute_error")
    || type == std::string("l1") || type == std::string("mae")) {
    return "regression_l1";
  } else if (type == std::string("multiclass") || type == std::string("softmax")) {
    return "multiclass";
  } else if (type == std::string("multiclassova") || type == std::string("multiclass_ova") || type == std::string("ova") || type == std::string("ovr")) {
    return "multiclassova";
  } else if (type == std::string("xentropy") || type == std::string("cross_entropy")) {
    return "cross_entropy";
  } else if (type == std::string("xentlambda") || type == std::string("cross_entropy_lambda")) {
    return "cross_entropy_lambda";
  } else if (type == std::string("mean_absolute_percentage_error") || type == std::string("mape")) {
    return "mape";
1160
1161
1162
  } else if (type == std::string("rank_xendcg") || type == std::string("xendcg") || type == std::string("xe_ndcg")
             || type == std::string("xe_ndcg_mart") || type == std::string("xendcg_mart")) {
    return "rank_xendcg";
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
  } else if (type == std::string("none") || type == std::string("null") || type == std::string("custom") || type == std::string("na")) {
    return "custom";
  }
  return type;
}

inline std::string ParseMetricAlias(const std::string& type) {
  if (type == std::string("regression") || type == std::string("regression_l2") || type == std::string("l2") || type == std::string("mean_squared_error") || type == std::string("mse")) {
    return "l2";
  } else if (type == std::string("l2_root") || type == std::string("root_mean_squared_error") || type == std::string("rmse")) {
    return "rmse";
  } else if (type == std::string("regression_l1") || type == std::string("l1") || type == std::string("mean_absolute_error") || type == std::string("mae")) {
    return "l1";
  } else if (type == std::string("binary_logloss") || type == std::string("binary")) {
    return "binary_logloss";
1178
1179
  } else if (type == std::string("ndcg") || type == std::string("lambdarank") || type == std::string("rank_xendcg")
             || type == std::string("xendcg") || type == std::string("xe_ndcg") || type == std::string("xe_ndcg_mart") || type == std::string("xendcg_mart")) {
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
    return "ndcg";
  } else if (type == std::string("map") || type == std::string("mean_average_precision")) {
    return "map";
  } else if (type == std::string("multi_logloss") || type == std::string("multiclass") || type == std::string("softmax") || type == std::string("multiclassova") || type == std::string("multiclass_ova") || type == std::string("ova") || type == std::string("ovr")) {
    return "multi_logloss";
  } else if (type == std::string("xentropy") || type == std::string("cross_entropy")) {
    return "cross_entropy";
  } else if (type == std::string("xentlambda") || type == std::string("cross_entropy_lambda")) {
    return "cross_entropy_lambda";
  } else if (type == std::string("kldiv") || type == std::string("kullback_leibler")) {
    return "kullback_leibler";
  } else if (type == std::string("mean_absolute_percentage_error") || type == std::string("mape")) {
    return "mape";
  } else if (type == std::string("none") || type == std::string("null") || type == std::string("custom") || type == std::string("na")) {
    return "custom";
  }
  return type;
}

Guolin Ke's avatar
Guolin Ke committed
1199
1200
}   // namespace LightGBM

Belinda Trotta's avatar
Belinda Trotta committed
1201
#endif   // LightGBM_CONFIG_H_