config.h 65.7 KB
Newer Older
1
2
3
4
5
/*!
 * Copyright (c) 2016 Microsoft Corporation. All rights reserved.
 * Licensed under the MIT License. See LICENSE file in the project root for license information.
 *
 * \note
6
7
8
9
 * - desc and descl2 fields must be written in reStructuredText format;
 * - nested sections can be placed only at the bottom of parent's section;
 * - [doc-only] tag indicates that only documentation for this param should be generated and all other actions are performed manually;
 * - [no-save] tag indicates that this param should not be saved into a model text representation.
10
 */
Guolin Ke's avatar
Guolin Ke committed
11
12
13
#ifndef LIGHTGBM_CONFIG_H_
#define LIGHTGBM_CONFIG_H_

14
15
16
17
18
#include <LightGBM/export.h>
#include <LightGBM/meta.h>
#include <LightGBM/utils/common.h>
#include <LightGBM/utils/log.h>

Guolin Ke's avatar
Guolin Ke committed
19
20
#include <string>
#include <algorithm>
Guolin Ke's avatar
Guolin Ke committed
21
#include <memory>
22
23
24
#include <unordered_map>
#include <unordered_set>
#include <vector>
Guolin Ke's avatar
Guolin Ke committed
25
26
27

namespace LightGBM {

Guolin Ke's avatar
Guolin Ke committed
28
29
/*! \brief Types of tasks */
enum TaskType {
30
  kTrain, kPredict, kConvertModel, KRefitTree, kSaveBinary
Guolin Ke's avatar
Guolin Ke committed
31
};
32
const int kDefaultNumLeaves = 31;
Guolin Ke's avatar
Guolin Ke committed
33

Guolin Ke's avatar
Guolin Ke committed
34
struct Config {
Nikita Titov's avatar
Nikita Titov committed
35
 public:
Guolin Ke's avatar
Guolin Ke committed
36
  std::string ToString() const;
Guolin Ke's avatar
Guolin Ke committed
37
38
39
40
  /*!
  * \brief Get string value by specific name of key
  * \param params Store the key and value for params
  * \param name Name of key
Hui Xue's avatar
Hui Xue committed
41
  * \param out Value will assign to out if key exists
Guolin Ke's avatar
Guolin Ke committed
42
43
  * \return True if key exists
  */
Guolin Ke's avatar
Guolin Ke committed
44
  inline static bool GetString(
Guolin Ke's avatar
Guolin Ke committed
45
46
47
48
49
50
51
    const std::unordered_map<std::string, std::string>& params,
    const std::string& name, std::string* out);

  /*!
  * \brief Get int value by specific name of key
  * \param params Store the key and value for params
  * \param name Name of key
Hui Xue's avatar
Hui Xue committed
52
  * \param out Value will assign to out if key exists
Guolin Ke's avatar
Guolin Ke committed
53
54
  * \return True if key exists
  */
Guolin Ke's avatar
Guolin Ke committed
55
  inline static bool GetInt(
Guolin Ke's avatar
Guolin Ke committed
56
57
58
59
    const std::unordered_map<std::string, std::string>& params,
    const std::string& name, int* out);

  /*!
60
  * \brief Get double value by specific name of key
Guolin Ke's avatar
Guolin Ke committed
61
62
  * \param params Store the key and value for params
  * \param name Name of key
Hui Xue's avatar
Hui Xue committed
63
  * \param out Value will assign to out if key exists
Guolin Ke's avatar
Guolin Ke committed
64
65
  * \return True if key exists
  */
Guolin Ke's avatar
Guolin Ke committed
66
  inline static bool GetDouble(
Guolin Ke's avatar
Guolin Ke committed
67
    const std::unordered_map<std::string, std::string>& params,
68
    const std::string& name, double* out);
Guolin Ke's avatar
Guolin Ke committed
69
70
71
72
73

  /*!
  * \brief Get bool value by specific name of key
  * \param params Store the key and value for params
  * \param name Name of key
Hui Xue's avatar
Hui Xue committed
74
  * \param out Value will assign to out if key exists
Guolin Ke's avatar
Guolin Ke committed
75
76
  * \return True if key exists
  */
Guolin Ke's avatar
Guolin Ke committed
77
  inline static bool GetBool(
Guolin Ke's avatar
Guolin Ke committed
78
79
    const std::unordered_map<std::string, std::string>& params,
    const std::string& name, bool* out);
80

81
82
83
84
85
86
87
88
  /*!
  * \brief Sort aliases by length and then alphabetically
  * \param x Alias 1
  * \param y Alias 2
  * \return true if x has higher priority than y
  */
  inline static bool SortAlias(const std::string& x, const std::string& y);

Guolin Ke's avatar
Guolin Ke committed
89
  static void KV2Map(std::unordered_map<std::string, std::string>* params, const char* kv);
90
  static std::unordered_map<std::string, std::string> Str2Map(const char* parameters);
Guolin Ke's avatar
Guolin Ke committed
91

92
  #ifndef __NVCC__
Guolin Ke's avatar
Guolin Ke committed
93
  #pragma region Parameters
94

Guolin Ke's avatar
Guolin Ke committed
95
  #pragma region Core Parameters
96
  #endif  // __NVCC__
Guolin Ke's avatar
Guolin Ke committed
97

98
  // [no-save]
Guolin Ke's avatar
Guolin Ke committed
99
  // [doc-only]
100
101
  // alias = config_file
  // desc = path of config file
102
  // desc = **Note**: can be used only in CLI version
Guolin Ke's avatar
Guolin Ke committed
103
104
  std::string config = "";

105
  // [no-save]
Guolin Ke's avatar
Guolin Ke committed
106
  // [doc-only]
107
108
109
110
111
112
  // type = enum
  // default = train
  // options = train, predict, convert_model, refit
  // alias = task_type
  // desc = ``train``, for training, aliases: ``training``
  // desc = ``predict``, for prediction, aliases: ``prediction``, ``test``
Nikita Titov's avatar
Nikita Titov committed
113
  // desc = ``convert_model``, for converting model file into if-else format, see more information in `Convert Parameters <#convert-parameters>`__
114
  // desc = ``refit``, for refitting existing models with new data, aliases: ``refit_tree``
115
  // desc = ``save_binary``, load train (and validation) data then save dataset to binary file. Typical usage: ``save_binary`` first, then run multiple ``train`` tasks in parallel using the saved binary file
Guolin Ke's avatar
Guolin Ke committed
116
  // desc = **Note**: can be used only in CLI version; for language-specific packages you can use the correspondent functions
Guolin Ke's avatar
Guolin Ke committed
117
118
119
  TaskType task = TaskType::kTrain;

  // [doc-only]
120
  // type = enum
121
  // options = regression, regression_l1, huber, fair, poisson, quantile, mape, gamma, tweedie, binary, multiclass, multiclassova, cross_entropy, cross_entropy_lambda, lambdarank, rank_xendcg
122
  // alias = objective_type, app, application, loss
123
  // desc = regression application
Guolin Ke's avatar
Guolin Ke committed
124
125
  // descl2 = ``regression``, L2 loss, aliases: ``regression_l2``, ``l2``, ``mean_squared_error``, ``mse``, ``l2_root``, ``root_mean_squared_error``, ``rmse``
  // descl2 = ``regression_l1``, L1 loss, aliases: ``l1``, ``mean_absolute_error``, ``mae``
126
127
128
129
130
  // descl2 = ``huber``, `Huber loss <https://en.wikipedia.org/wiki/Huber_loss>`__
  // descl2 = ``fair``, `Fair loss <https://www.kaggle.com/c/allstate-claims-severity/discussion/24520>`__
  // descl2 = ``poisson``, `Poisson regression <https://en.wikipedia.org/wiki/Poisson_regression>`__
  // descl2 = ``quantile``, `Quantile regression <https://en.wikipedia.org/wiki/Quantile_regression>`__
  // descl2 = ``mape``, `MAPE loss <https://en.wikipedia.org/wiki/Mean_absolute_percentage_error>`__, aliases: ``mean_absolute_percentage_error``
131
  // descl2 = ``gamma``, Gamma regression with log-link. It might be useful, e.g., for modeling insurance claims severity, or for any target that might be `gamma-distributed <https://en.wikipedia.org/wiki/Gamma_distribution#Occurrence_and_applications>`__
132
  // descl2 = ``tweedie``, Tweedie regression with log-link. It might be useful, e.g., for modeling total loss in insurance, or for any target that might be `tweedie-distributed <https://en.wikipedia.org/wiki/Tweedie_distribution#Occurrence_and_applications>`__
133
134
135
  // desc = binary classification application
  // descl2 = ``binary``, binary `log loss <https://en.wikipedia.org/wiki/Cross_entropy>`__ classification (or logistic regression)
  // descl2 = requires labels in {0, 1}; see ``cross-entropy`` application for general probability labels in [0, 1]
136
137
138
139
140
  // desc = multi-class classification application
  // descl2 = ``multiclass``, `softmax <https://en.wikipedia.org/wiki/Softmax_function>`__ objective function, aliases: ``softmax``
  // descl2 = ``multiclassova``, `One-vs-All <https://en.wikipedia.org/wiki/Multiclass_classification#One-vs.-rest>`__ binary objective function, aliases: ``multiclass_ova``, ``ova``, ``ovr``
  // descl2 = ``num_class`` should be set as well
  // desc = cross-entropy application
Guolin Ke's avatar
Guolin Ke committed
141
142
  // descl2 = ``cross_entropy``, objective function for cross-entropy (with optional linear weights), aliases: ``xentropy``
  // descl2 = ``cross_entropy_lambda``, alternative parameterization of cross-entropy, aliases: ``xentlambda``
143
  // descl2 = label is anything in interval [0, 1]
144
  // desc = ranking application
145
  // descl2 = ``lambdarank``, `lambdarank <https://papers.nips.cc/paper/2971-learning-to-rank-with-nonsmooth-cost-functions.pdf>`__ objective. `label_gain <#label_gain>`__ can be used to set the gain (weight) of ``int`` label and all values in ``label`` must be smaller than number of elements in ``label_gain``
146
147
  // descl2 = ``rank_xendcg``, `XE_NDCG_MART <https://arxiv.org/abs/1911.09798>`__ ranking objective function, aliases: ``xendcg``, ``xe_ndcg``, ``xe_ndcg_mart``, ``xendcg_mart``
  // descl2 = ``rank_xendcg`` is faster than and achieves the similar performance as ``lambdarank``
148
  // descl2 = label should be ``int`` type, and larger number represents the higher relevance (e.g. 0:bad, 1:fair, 2:good, 3:perfect)
Guolin Ke's avatar
Guolin Ke committed
149
150
151
  std::string objective = "regression";

  // [doc-only]
152
153
  // type = enum
  // alias = boosting_type, boost
154
  // options = gbdt, rf, dart, goss
155
156
  // desc = ``gbdt``, traditional Gradient Boosting Decision Tree, aliases: ``gbrt``
  // desc = ``rf``, Random Forest, aliases: ``random_forest``
157
158
  // desc = ``dart``, `Dropouts meet Multiple Additive Regression Trees <https://arxiv.org/abs/1505.01866>`__
  // desc = ``goss``, Gradient-based One-Side Sampling
Nikita Titov's avatar
Nikita Titov committed
159
  // descl2 = **Note**: internally, LightGBM uses ``gbdt`` mode for the first ``1 / learning_rate`` iterations
Guolin Ke's avatar
Guolin Ke committed
160
161
  std::string boosting = "gbdt";

162
  // alias = train, train_data, train_data_file, data_filename
163
  // desc = path of training data, LightGBM will train from this data
164
  // desc = **Note**: can be used only in CLI version
Guolin Ke's avatar
Guolin Ke committed
165
166
  std::string data = "";

167
  // alias = test, valid_data, valid_data_file, test_data, test_data_file, valid_filenames
168
  // default = ""
169
  // desc = path(s) of validation/test data, LightGBM will output metrics for these data
170
  // desc = support multiple validation data, separated by ``,``
171
  // desc = **Note**: can be used only in CLI version
Guolin Ke's avatar
Guolin Ke committed
172
173
  std::vector<std::string> valid;

174
  // alias = num_iteration, n_iter, num_tree, num_trees, num_round, num_rounds, nrounds, num_boost_round, n_estimators, max_iter
175
176
177
  // check = >=0
  // desc = number of boosting iterations
  // desc = **Note**: internally, LightGBM constructs ``num_class * num_iterations`` trees for multi-class classification problems
Guolin Ke's avatar
Guolin Ke committed
178
  int num_iterations = 100;
Guolin Ke's avatar
Guolin Ke committed
179

180
  // alias = shrinkage_rate, eta
181
  // check = >0.0
182
183
  // desc = shrinkage rate
  // desc = in ``dart``, it also affects on normalization weights of dropped trees
Guolin Ke's avatar
Guolin Ke committed
184
185
  double learning_rate = 0.1;

186
  // default = 31
187
  // alias = num_leaf, max_leaves, max_leaf, max_leaf_nodes
188
  // check = >1
189
  // check = <=131072
190
  // desc = max number of leaves in one tree
Guolin Ke's avatar
Guolin Ke committed
191
192
193
  int num_leaves = kDefaultNumLeaves;

  // [doc-only]
194
195
  // type = enum
  // options = serial, feature, data, voting
196
  // alias = tree, tree_type, tree_learner_type
197
198
199
200
  // desc = ``serial``, single machine tree learner
  // desc = ``feature``, feature parallel tree learner, aliases: ``feature_parallel``
  // desc = ``data``, data parallel tree learner, aliases: ``data_parallel``
  // desc = ``voting``, voting parallel tree learner, aliases: ``voting_parallel``
201
  // desc = refer to `Distributed Learning Guide <./Parallel-Learning-Guide.rst>`__ to get more details
Guolin Ke's avatar
Guolin Ke committed
202
203
  std::string tree_learner = "serial";

204
  // alias = num_thread, nthread, nthreads, n_jobs
205
  // desc = used only in ``train``, ``prediction`` and ``refit`` tasks or in correspondent functions of language-specific packages
Guolin Ke's avatar
Guolin Ke committed
206
  // desc = number of threads for LightGBM
207
208
209
210
  // desc = ``0`` means default number of threads in OpenMP
  // desc = for the best speed, set this to the number of **real CPU cores**, not the number of threads (most CPUs use `hyper-threading <https://en.wikipedia.org/wiki/Hyper-threading>`__ to generate 2 threads per CPU core)
  // desc = do not set it too large if your dataset is small (for instance, do not use 64 threads for a dataset with 10,000 rows)
  // desc = be aware a task manager or any similar CPU monitoring tool might report that cores not being fully utilized. **This is normal**
211
  // desc = for distributed learning, do not use all CPU cores because this will cause poor performance for the network communication
212
  // desc = **Note**: please **don't** change this during training, especially when running multiple jobs simultaneously by external packages, otherwise it may cause undesirable errors
Guolin Ke's avatar
Guolin Ke committed
213
214
215
  int num_threads = 0;

  // [doc-only]
216
  // type = enum
217
  // options = cpu, gpu, cuda, cuda_exp
218
  // alias = device
219
220
221
222
  // desc = device for the tree learning, you can use GPU to achieve the faster learning
  // desc = **Note**: it is recommended to use the smaller ``max_bin`` (e.g. 63) to get the better speed up
  // desc = **Note**: for the faster speed, GPU uses 32-bit float point to sum up by default, so this may affect the accuracy for some tasks. You can set ``gpu_use_dp=true`` to enable 64-bit float point, but it will slow down the training
  // desc = **Note**: refer to `Installation Guide <./Installation-Guide.rst#build-gpu-version>`__ to build LightGBM with GPU support
223
224
  // desc = **Note**: ``cuda_exp`` is an experimental CUDA version, the installation guide for ``cuda_exp`` is identical with ``cuda``
  // desc = **Note**: ``cuda_exp`` is faster than ``cuda`` and will replace ``cuda`` in the future
Guolin Ke's avatar
Guolin Ke committed
225
226
227
  std::string device_type = "cpu";

  // [doc-only]
228
  // alias = random_seed, random_state
229
230
231
232
  // default = None
  // desc = this seed is used to generate other seeds, e.g. ``data_random_seed``, ``feature_fraction_seed``, etc.
  // desc = by default, this seed is unused in favor of default values of other seeds
  // desc = this seed has lower priority in comparison with other seeds, which means that it will be overridden, if you set other seeds explicitly
Guolin Ke's avatar
Guolin Ke committed
233
234
  int seed = 0;

Guolin Ke's avatar
Guolin Ke committed
235
236
237
238
239
  // desc = used only with ``cpu`` device type
  // desc = setting this to ``true`` should ensure the stable results when using the same data and the same parameters (and different ``num_threads``)
  // desc = when you use the different seeds, different LightGBM versions, the binaries compiled by different compilers, or in different systems, the results are expected to be different
  // desc = you can `raise issues <https://github.com/microsoft/LightGBM/issues>`__ in LightGBM GitHub repo when you meet the unstable results
  // desc = **Note**: setting this to ``true`` may slow down the training
240
  // desc = **Note**: to avoid potential instability due to numerical issues, please set ``force_col_wise=true`` or ``force_row_wise=true`` when setting ``deterministic=true``
Guolin Ke's avatar
Guolin Ke committed
241
242
  bool deterministic = false;

243
  #ifndef __NVCC__
Guolin Ke's avatar
Guolin Ke committed
244
245
246
  #pragma endregion

  #pragma region Learning Control Parameters
247
  #endif  // __NVCC__
Guolin Ke's avatar
Guolin Ke committed
248

249
250
251
252
  // desc = used only with ``cpu`` device type
  // desc = set this to ``true`` to force col-wise histogram building
  // desc = enabling this is recommended when:
  // descl2 = the number of columns is large, or the total number of bins is large
Nikita Titov's avatar
Nikita Titov committed
253
  // descl2 = ``num_threads`` is large, e.g. ``> 20``
254
255
256
  // descl2 = you want to reduce memory cost
  // desc = **Note**: when both ``force_col_wise`` and ``force_row_wise`` are ``false``, LightGBM will firstly try them both, and then use the faster one. To remove the overhead of testing set the faster one to ``true`` manually
  // desc = **Note**: this parameter cannot be used at the same time with ``force_row_wise``, choose only one of them
257
258
  bool force_col_wise = false;

259
260
261
262
  // desc = used only with ``cpu`` device type
  // desc = set this to ``true`` to force row-wise histogram building
  // desc = enabling this is recommended when:
  // descl2 = the number of data points is large, and the total number of bins is relatively small
Nikita Titov's avatar
Nikita Titov committed
263
  // descl2 = ``num_threads`` is relatively small, e.g. ``<= 16``
264
265
266
267
  // descl2 = you want to use small ``bagging_fraction`` or ``goss`` boosting to speed up
  // desc = **Note**: setting this to ``true`` will double the memory cost for Dataset object. If you have not enough memory, you can try setting ``force_col_wise=true``
  // desc = **Note**: when both ``force_col_wise`` and ``force_row_wise`` are ``false``, LightGBM will firstly try them both, and then use the faster one. To remove the overhead of testing set the faster one to ``true`` manually
  // desc = **Note**: this parameter cannot be used at the same time with ``force_col_wise``, choose only one of them
268
269
  bool force_row_wise = false;

270
271
272
273
274
  // alias = hist_pool_size
  // desc = max cache size in MB for historical histogram
  // desc = ``< 0`` means no limit
  double histogram_pool_size = -1.0;

275
  // desc = limit the max depth for tree model. This is used to deal with over-fitting when ``#data`` is small. Tree still grows leaf-wise
276
  // desc = ``<= 0`` means no limit
Guolin Ke's avatar
Guolin Ke committed
277
278
  int max_depth = -1;

279
  // alias = min_data_per_leaf, min_data, min_child_samples, min_samples_leaf
280
281
  // check = >=0
  // desc = minimal number of data in one leaf. Can be used to deal with over-fitting
282
  // desc = **Note**: this is an approximation based on the Hessian, so occasionally you may observe splits which produce leaf nodes that have less than this many observations
Guolin Ke's avatar
Guolin Ke committed
283
284
  int min_data_in_leaf = 20;

285
286
287
  // alias = min_sum_hessian_per_leaf, min_sum_hessian, min_hessian, min_child_weight
  // check = >=0.0
  // desc = minimal sum hessian in one leaf. Like ``min_data_in_leaf``, it can be used to deal with over-fitting
Guolin Ke's avatar
Guolin Ke committed
288
289
  double min_sum_hessian_in_leaf = 1e-3;

290
291
292
293
294
295
296
  // alias = sub_row, subsample, bagging
  // check = >0.0
  // check = <=1.0
  // desc = like ``feature_fraction``, but this will randomly select part of data without resampling
  // desc = can be used to speed up training
  // desc = can be used to deal with over-fitting
  // desc = **Note**: to enable bagging, ``bagging_freq`` should be set to a non zero value as well
Guolin Ke's avatar
Guolin Ke committed
297
298
  double bagging_fraction = 1.0;

Guolin Ke's avatar
Guolin Ke committed
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
  // alias = pos_sub_row, pos_subsample, pos_bagging
  // check = >0.0
  // check = <=1.0
  // desc = used only in ``binary`` application
  // desc = used for imbalanced binary classification problem, will randomly sample ``#pos_samples * pos_bagging_fraction`` positive samples in bagging
  // desc = should be used together with ``neg_bagging_fraction``
  // desc = set this to ``1.0`` to disable
  // desc = **Note**: to enable this, you need to set ``bagging_freq`` and ``neg_bagging_fraction`` as well
  // desc = **Note**: if both ``pos_bagging_fraction`` and ``neg_bagging_fraction`` are set to ``1.0``,  balanced bagging is disabled
  // desc = **Note**: if balanced bagging is enabled, ``bagging_fraction`` will be ignored
  double pos_bagging_fraction = 1.0;

  // alias = neg_sub_row, neg_subsample, neg_bagging
  // check = >0.0
  // check = <=1.0
  // desc = used only in ``binary`` application
  // desc = used for imbalanced binary classification problem, will randomly sample ``#neg_samples * neg_bagging_fraction`` negative samples in bagging
  // desc = should be used together with ``pos_bagging_fraction``
  // desc = set this to ``1.0`` to disable
  // desc = **Note**: to enable this, you need to set ``bagging_freq`` and ``pos_bagging_fraction`` as well
  // desc = **Note**: if both ``pos_bagging_fraction`` and ``neg_bagging_fraction`` are set to ``1.0``,  balanced bagging is disabled
  // desc = **Note**: if balanced bagging is enabled, ``bagging_fraction`` will be ignored
  double neg_bagging_fraction = 1.0;

323
324
  // alias = subsample_freq
  // desc = frequency for bagging
325
  // desc = ``0`` means disable bagging; ``k`` means perform bagging at every ``k`` iteration. Every ``k``-th iteration, LightGBM will randomly select ``bagging_fraction * 100 %`` of the data to use for the next ``k`` iterations
326
  // desc = **Note**: to enable bagging, ``bagging_fraction`` should be set to value smaller than ``1.0`` as well
Guolin Ke's avatar
Guolin Ke committed
327
328
329
330
331
332
333
  int bagging_freq = 0;

  // alias = bagging_fraction_seed
  // desc = random seed for bagging
  int bagging_seed = 3;

  // alias = sub_feature, colsample_bytree
334
335
  // check = >0.0
  // check = <=1.0
336
  // desc = LightGBM will randomly select a subset of features on each iteration (tree) if ``feature_fraction`` is smaller than ``1.0``. For example, if you set it to ``0.8``, LightGBM will select 80% of features before training each tree
337
338
  // desc = can be used to speed up training
  // desc = can be used to deal with over-fitting
Guolin Ke's avatar
Guolin Ke committed
339
340
  double feature_fraction = 1.0;

341
342
343
  // alias = sub_feature_bynode, colsample_bynode
  // check = >0.0
  // check = <=1.0
344
  // desc = LightGBM will randomly select a subset of features on each tree node if ``feature_fraction_bynode`` is smaller than ``1.0``. For example, if you set it to ``0.8``, LightGBM will select 80% of features at each tree node
345
346
347
348
349
  // desc = can be used to deal with over-fitting
  // desc = **Note**: unlike ``feature_fraction``, this cannot speed up training
  // desc = **Note**: if both ``feature_fraction`` and ``feature_fraction_bynode`` are smaller than ``1.0``, the final fraction of each node is ``feature_fraction * feature_fraction_bynode``
  double feature_fraction_bynode = 1.0;

350
  // desc = random seed for ``feature_fraction``
Guolin Ke's avatar
Guolin Ke committed
351
352
  int feature_fraction_seed = 2;

Nikita Titov's avatar
Nikita Titov committed
353
  // alias = extra_tree
354
355
  // desc = use extremely randomized trees
  // desc = if set to ``true``, when evaluating node splits LightGBM will check only one randomly-chosen threshold for each feature
356
  // desc = can be used to speed up training
357
358
359
360
361
362
  // desc = can be used to deal with over-fitting
  bool extra_trees = false;

  // desc = random seed for selecting thresholds when ``extra_trees`` is true
  int extra_seed = 6;

363
  // alias = early_stopping_rounds, early_stopping, n_iter_no_change
364
365
  // desc = will stop training if one metric of one validation data doesn't improve in last ``early_stopping_round`` rounds
  // desc = ``<= 0`` means disable
366
  // desc = can be used to speed up training
Guolin Ke's avatar
Guolin Ke committed
367
368
  int early_stopping_round = 0;

369
  // desc = LightGBM allows you to provide multiple evaluation metrics. Set this to ``true``, if you want to use only the first metric for early stopping
370
371
  bool first_metric_only = false;

372
373
374
375
  // alias = max_tree_output, max_leaf_output
  // desc = used to limit the max output of tree leaves
  // desc = ``<= 0`` means no constraint
  // desc = the final max output of leaves is ``learning_rate * max_delta_step``
Guolin Ke's avatar
Guolin Ke committed
376
377
  double max_delta_step = 0.0;

378
  // alias = reg_alpha, l1_regularization
379
380
  // check = >=0.0
  // desc = L1 regularization
Guolin Ke's avatar
Guolin Ke committed
381
382
  double lambda_l1 = 0.0;

383
  // alias = reg_lambda, lambda, l2_regularization
384
  // check = >=0.0
Guolin Ke's avatar
Guolin Ke committed
385
386
387
  // desc = L2 regularization
  double lambda_l2 = 0.0;

388
  // check = >=0.0
389
  // desc = linear tree regularization, corresponds to the parameter ``lambda`` in Eq. 3 of `Gradient Boosting with Piece-Wise Linear Regression Trees <https://arxiv.org/pdf/1802.05640.pdf>`__
390
391
  double linear_lambda = 0.0;

392
393
394
  // alias = min_split_gain
  // check = >=0.0
  // desc = the minimal gain to perform split
395
  // desc = can be used to speed up training
Guolin Ke's avatar
Guolin Ke committed
396
397
  double min_gain_to_split = 0.0;

398
  // alias = rate_drop
399
400
401
  // check = >=0.0
  // check = <=1.0
  // desc = used only in ``dart``
402
  // desc = dropout rate: a fraction of previous trees to drop during the dropout
Guolin Ke's avatar
Guolin Ke committed
403
404
  double drop_rate = 0.1;

405
  // desc = used only in ``dart``
406
  // desc = max number of dropped trees during one boosting iteration
407
  // desc = ``<=0`` means no limit
Guolin Ke's avatar
Guolin Ke committed
408
409
  int max_drop = 50;

410
411
412
  // check = >=0.0
  // check = <=1.0
  // desc = used only in ``dart``
413
  // desc = probability of skipping the dropout procedure during a boosting iteration
Guolin Ke's avatar
Guolin Ke committed
414
415
  double skip_drop = 0.5;

416
417
  // desc = used only in ``dart``
  // desc = set this to ``true``, if you want to use xgboost dart mode
Guolin Ke's avatar
Guolin Ke committed
418
419
  bool xgboost_dart_mode = false;

420
421
  // desc = used only in ``dart``
  // desc = set this to ``true``, if you want to use uniform drop
Guolin Ke's avatar
Guolin Ke committed
422
423
  bool uniform_drop = false;

424
425
  // desc = used only in ``dart``
  // desc = random seed to choose dropping models
Guolin Ke's avatar
Guolin Ke committed
426
427
  int drop_seed = 4;

428
429
430
431
  // check = >=0.0
  // check = <=1.0
  // desc = used only in ``goss``
  // desc = the retain ratio of large gradient data
Guolin Ke's avatar
Guolin Ke committed
432
433
  double top_rate = 0.2;

434
435
436
437
  // check = >=0.0
  // check = <=1.0
  // desc = used only in ``goss``
  // desc = the retain ratio of small gradient data
Guolin Ke's avatar
Guolin Ke committed
438
439
  double other_rate = 0.1;

440
441
  // check = >0
  // desc = minimal number of data per categorical group
Guolin Ke's avatar
Guolin Ke committed
442
443
  int min_data_per_group = 100;

444
445
  // check = >0
  // desc = used for the categorical features
446
447
  // desc = limit number of split points considered for categorical features. See `the documentation on how LightGBM finds optimal splits for categorical features <./Features.rst#optimal-split-for-categorical-features>`_ for more details
  // desc = can be used to speed up training
Guolin Ke's avatar
Guolin Ke committed
448
449
  int max_cat_threshold = 32;

450
451
  // check = >=0.0
  // desc = used for the categorical features
452
  // desc = L2 regularization in categorical split
453
  double cat_l2 = 10.0;
Guolin Ke's avatar
Guolin Ke committed
454

455
456
457
458
  // check = >=0.0
  // desc = used for the categorical features
  // desc = this can reduce the effect of noises in categorical features, especially for categories with few data
  double cat_smooth = 10.0;
459

460
461
  // check = >0
  // desc = when number of categories of one feature smaller than or equal to ``max_cat_to_onehot``, one-vs-other split algorithm will be used
Guolin Ke's avatar
Guolin Ke committed
462
463
464
  int max_cat_to_onehot = 4;

  // alias = topk
465
  // check = >0
466
  // desc = used only in ``voting`` tree learner, refer to `Voting parallel <./Parallel-Learning-Guide.rst#choose-appropriate-parallel-algorithm>`__
467
  // desc = set this to larger value for more accurate result, but it will slow down the training speed
Guolin Ke's avatar
Guolin Ke committed
468
469
470
  int top_k = 20;

  // type = multi-int
471
  // alias = mc, monotone_constraint, monotonic_cst
472
473
474
475
  // default = None
  // desc = used for constraints of monotonic features
  // desc = ``1`` means increasing, ``-1`` means decreasing, ``0`` means non-constraint
  // desc = you need to specify all features in order. For example, ``mc=-1,0,1`` means decreasing for 1st feature, non-constraint for 2nd feature and increasing for the 3rd feature
Guolin Ke's avatar
Guolin Ke committed
476
  std::vector<int8_t> monotone_constraints;
Guolin Ke's avatar
Guolin Ke committed
477

Nikita Titov's avatar
Nikita Titov committed
478
  // type = enum
479
  // alias = monotone_constraining_method, mc_method
480
  // options = basic, intermediate, advanced
481
482
483
  // desc = used only if ``monotone_constraints`` is set
  // desc = monotone constraints method
  // descl2 = ``basic``, the most basic monotone constraints method. It does not slow the library at all, but over-constrains the predictions
484
485
  // descl2 = ``intermediate``, a `more advanced method <https://hal.archives-ouvertes.fr/hal-02862802/document>`__, which may slow the library very slightly. However, this method is much less constraining than the basic method and should significantly improve the results
  // descl2 = ``advanced``, an `even more advanced method <https://hal.archives-ouvertes.fr/hal-02862802/document>`__, which may slow the library. However, this method is even less constraining than the intermediate method and should again significantly improve the results
486
487
  std::string monotone_constraints_method = "basic";

488
489
490
  // alias = monotone_splits_penalty, ms_penalty, mc_penalty
  // check = >=0.0
  // desc = used only if ``monotone_constraints`` is set
491
  // desc = `monotone penalty <https://hal.archives-ouvertes.fr/hal-02862802/document>`__: a penalization parameter X forbids any monotone splits on the first X (rounded down) level(s) of the tree. The penalty applied to monotone splits on a given depth is a continuous, increasing function the penalization parameter
492
493
494
  // desc = if ``0.0`` (the default), no penalization is applied
  double monotone_penalty = 0.0;

Guolin Ke's avatar
Guolin Ke committed
495
  // type = multi-double
496
  // alias = feature_contrib, fc, fp, feature_penalty
Guolin Ke's avatar
Guolin Ke committed
497
498
499
500
  // default = None
  // desc = used to control feature's split gain, will use ``gain[i] = max(0, feature_contri[i]) * gain[i]`` to replace the split gain of i-th feature
  // desc = you need to specify all features in order
  std::vector<double> feature_contri;
501

502
503
504
505
  // alias = fs, forced_splits_filename, forced_splits_file, forced_splits
  // desc = path to a ``.json`` file that specifies splits to force at the top of every decision tree before best-first learning commences
  // desc = ``.json`` file can be arbitrarily nested, and each split contains ``feature``, ``threshold`` fields, as well as ``left`` and ``right`` fields representing subsplits
  // desc = categorical splits are forced in a one-hot fashion, with ``left`` representing the split containing the feature value and ``right`` representing other values
506
  // desc = **Note**: the forced split logic will be ignored, if the split makes gain worse
507
  // desc = see `this file <https://github.com/microsoft/LightGBM/tree/master/examples/binary_classification/forced_splits.json>`__ as an example
Guolin Ke's avatar
Guolin Ke committed
508
509
  std::string forcedsplits_filename = "";

Guolin Ke's avatar
Guolin Ke committed
510
511
512
513
514
515
  // check = >=0.0
  // check = <=1.0
  // desc = decay rate of ``refit`` task, will use ``leaf_output = refit_decay_rate * old_leaf_output + (1.0 - refit_decay_rate) * new_leaf_output`` to refit trees
  // desc = used only in ``refit`` task in CLI version or as argument in ``refit`` function in language-specific package
  double refit_decay_rate = 0.9;

516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
  // check = >=0.0
  // desc = cost-effective gradient boosting multiplier for all penalties
  double cegb_tradeoff = 1.0;

  // check = >=0.0
  // desc = cost-effective gradient-boosting penalty for splitting a node
  double cegb_penalty_split = 0.0;

  // type = multi-double
  // default = 0,0,...,0
  // desc = cost-effective gradient boosting penalty for using a feature
  // desc = applied per data point
  std::vector<double> cegb_penalty_feature_lazy;

  // type = multi-double
  // default = 0,0,...,0
  // desc = cost-effective gradient boosting penalty for using a feature
  // desc = applied once per forest
534
  std::vector<double> cegb_penalty_feature_coupled;
535

Belinda Trotta's avatar
Belinda Trotta committed
536
537
538
539
540
  // check = >= 0.0
  // desc = controls smoothing applied to tree nodes
  // desc = helps prevent overfitting on leaves with few samples
  // desc = if set to zero, no smoothing is applied
  // desc = if ``path_smooth > 0`` then ``min_data_in_leaf`` must be at least ``2``
541
  // desc = larger values give stronger regularization
542
  // descl2 = the weight of each node is ``w * (n / path_smooth) / (n / path_smooth + 1) + w_p / (n / path_smooth + 1)``, where ``n`` is the number of samples in the node, ``w`` is the optimal node weight to minimise the loss (approximately ``-sum_gradients / sum_hessians``), and ``w_p`` is the weight of the parent node
Belinda Trotta's avatar
Belinda Trotta committed
543
544
545
  // descl2 = note that the parent output ``w_p`` itself has smoothing applied, unless it is the root node, so that the smoothing effect accumulates with the tree depth
  double path_smooth = 0;

546
547
548
549
  // desc = controls which features can appear in the same branch
  // desc = by default interaction constraints are disabled, to enable them you can specify
  // descl2 = for CLI, lists separated by commas, e.g. ``[0,1,2],[2,3]``
  // descl2 = for Python-package, list of lists, e.g. ``[[0, 1, 2], [2, 3]]``
550
  // descl2 = for R-package, list of character or numeric vectors, e.g. ``list(c("var1", "var2", "var3"), c("var3", "var4"))`` or ``list(c(1L, 2L, 3L), c(3L, 4L))``. Numeric vectors should use 1-based indexing, where ``1L`` is the first feature, ``2L`` is the second feature, etc
551
552
553
  // desc = any two features can only appear in the same branch only if there exists a constraint containing both features
  std::string interaction_constraints = "";

554
555
  // alias = verbose
  // desc = controls the level of LightGBM's verbosity
556
  // desc = ``< 0``: Fatal, ``= 0``: Error (Warning), ``= 1``: Info, ``> 1``: Debug
557
558
  int verbosity = 1;

559
  // [no-save]
560
561
562
563
564
565
566
  // alias = model_input, model_in
  // desc = filename of input model
  // desc = for ``prediction`` task, this model will be applied to prediction data
  // desc = for ``train`` task, training will be continued from this model
  // desc = **Note**: can be used only in CLI version
  std::string input_model = "";

567
  // [no-save]
568
569
570
571
572
  // alias = model_output, model_out
  // desc = filename of output model in training
  // desc = **Note**: can be used only in CLI version
  std::string output_model = "LightGBM_model.txt";

573
574
575
576
577
  // desc = the feature importance type in the saved model file
  // desc = ``0``: count-based feature importance (numbers of splits are counted); ``1``: gain-based feature importance (values of gain are counted)
  // desc = **Note**: can be used only in CLI version
  int saved_feature_importance_type = 0;

578
  // [no-save]
579
580
581
582
583
584
  // alias = save_period
  // desc = frequency of saving model file snapshot
  // desc = set this to positive value to enable this function. For example, the model file will be snapshotted at each iteration if ``snapshot_freq=1``
  // desc = **Note**: can be used only in CLI version
  int snapshot_freq = -1;

585
  #ifndef __NVCC__
586
587
588
589
590
  #pragma endregion

  #pragma region IO Parameters

  #pragma region Dataset Parameters
591
  #endif  // __NVCC__
592

Nikita Titov's avatar
Nikita Titov committed
593
594
595
596
  // alias = linear_trees
  // desc = fit piecewise linear gradient boosting tree
  // descl2 = tree splits are chosen in the usual way, but the model at each leaf is linear instead of constant
  // descl2 = the linear model at each leaf includes all the numerical features in that leaf's branch
597
  // descl2 = the first tree has constant leaf values
Nikita Titov's avatar
Nikita Titov committed
598
599
600
601
602
603
604
605
606
  // descl2 = categorical features are used for splits as normal but are not used in the linear models
  // descl2 = missing values should not be encoded as ``0``. Use ``np.nan`` for Python, ``NA`` for the CLI, and ``NA``, ``NA_real_``, or ``NA_integer_`` for R
  // descl2 = it is recommended to rescale data before training so that features have similar mean and standard deviation
  // descl2 = **Note**: only works with CPU and ``serial`` tree learner
  // descl2 = **Note**: ``regression_l1`` objective is not supported with linear tree boosting
  // descl2 = **Note**: setting ``linear_tree=true`` significantly increases the memory use of LightGBM
  // descl2 = **Note**: if you specify ``monotone_constraints``, constraints will be enforced when choosing the split points, but not when fitting the linear models on leaves
  bool linear_tree = false;

607
  // alias = max_bins
608
609
610
611
  // check = >1
  // desc = max number of bins that feature values will be bucketed in
  // desc = small number of bins may reduce training accuracy but may increase general power (deal with over-fitting)
  // desc = LightGBM will auto compress memory according to ``max_bin``. For example, LightGBM will use ``uint8_t`` for feature value if ``max_bin=255``
612
  int max_bin = 255;
Guolin Ke's avatar
Guolin Ke committed
613

Belinda Trotta's avatar
Belinda Trotta committed
614
615
616
617
  // type = multi-int
  // default = None
  // desc = max number of bins for each feature
  // desc = if not specified, will use ``max_bin`` for all features
618
  std::vector<int32_t> max_bin_by_feature;
Belinda Trotta's avatar
Belinda Trotta committed
619

620
621
622
  // check = >0
  // desc = minimal number of data inside one bin
  // desc = use this to avoid one-data-one-bin (potential over-fitting)
Guolin Ke's avatar
Guolin Ke committed
623
624
  int min_data_in_bin = 3;

625
626
  // alias = subsample_for_bin
  // check = >0
627
628
  // desc = number of data that sampled to construct feature discrete bins
  // desc = setting this to larger value will give better training result, but may increase data loading time
629
  // desc = set this to larger value if data is very sparse
630
  // desc = **Note**: don't set this to small values, otherwise, you may encounter unexpected errors and poor accuracy
631
632
  int bin_construct_sample_cnt = 200000;

633
  // alias = data_seed
634
  // desc = random seed for sampling data to construct histogram bins
Guolin Ke's avatar
Guolin Ke committed
635
  int data_random_seed = 1;
Guolin Ke's avatar
Guolin Ke committed
636

637
638
639
  // alias = is_sparse, enable_sparse, sparse
  // desc = used to enable/disable sparse optimization
  bool is_enable_sparse = true;
Guolin Ke's avatar
Guolin Ke committed
640

641
642
643
644
645
646
647
648
  // alias = is_enable_bundle, bundle
  // desc = set this to ``false`` to disable Exclusive Feature Bundling (EFB), which is described in `LightGBM: A Highly Efficient Gradient Boosting Decision Tree <https://papers.nips.cc/paper/6907-lightgbm-a-highly-efficient-gradient-boosting-decision-tree>`__
  // desc = **Note**: disabling this may cause the slow training speed for sparse datasets
  bool enable_bundle = true;

  // desc = set this to ``false`` to disable the special handle of missing value
  bool use_missing = true;

649
  // desc = set this to ``true`` to treat all zero as missing values (including the unshown values in LibSVM / sparse matrices)
650
651
652
  // desc = set this to ``false`` to use ``na`` for representing missing values
  bool zero_as_missing = false;

653
  // desc = set this to ``true`` (the default) to tell LightGBM to ignore the features that are unsplittable based on ``min_data_in_leaf``
654
655
656
657
658
  // desc = as dataset object is initialized only once and cannot be changed after that, you may need to set this to ``false`` when searching parameters with ``min_data_in_leaf``, otherwise features are filtered by ``min_data_in_leaf`` firstly if you don't reconstruct dataset object
  // desc = **Note**: setting this to ``false`` may slow down the training
  bool feature_pre_filter = true;

  // alias = is_pre_partition
659
  // desc = used for distributed learning (excluding the ``feature_parallel`` mode)
660
661
662
  // desc = ``true`` if training data are pre-partitioned, and different machines use different partitions
  bool pre_partition = false;

663
664
665
  // alias = two_round_loading, use_two_round_loading
  // desc = set this to ``true`` if data file is too big to fit in memory
  // desc = by default, LightGBM will map data file to memory and load features from memory. This will provide faster data loading speed, but may cause run out of memory error when the data file is very big
666
  // desc = **Note**: works only in case of loading data directly from text file
Guolin Ke's avatar
Guolin Ke committed
667
668
669
  bool two_round = false;

  // alias = has_header
670
  // desc = set this to ``true`` if input data has header
671
  // desc = **Note**: works only in case of loading data directly from text file
Guolin Ke's avatar
Guolin Ke committed
672
673
  bool header = false;

674
675
676
677
678
  // type = int or string
  // alias = label
  // desc = used to specify the label column
  // desc = use number for index, e.g. ``label=0`` means column\_0 is the label
  // desc = add a prefix ``name:`` for column name, e.g. ``label=name:is_click``
679
  // desc = if omitted, the first column in the training data is used as the label
680
  // desc = **Note**: works only in case of loading data directly from text file
Guolin Ke's avatar
Guolin Ke committed
681
  std::string label_column = "";
Guolin Ke's avatar
Guolin Ke committed
682

683
684
685
686
687
  // type = int or string
  // alias = weight
  // desc = used to specify the weight column
  // desc = use number for index, e.g. ``weight=0`` means column\_0 is the weight
  // desc = add a prefix ``name:`` for column name, e.g. ``weight=name:weight``
688
  // desc = **Note**: works only in case of loading data directly from text file
689
  // desc = **Note**: index starts from ``0`` and it doesn't count the label column when passing type is ``int``, e.g. when label is column\_0, and weight is column\_1, the correct parameter is ``weight=0``
690
  // desc = **Note**: weights should be non-negative
Guolin Ke's avatar
Guolin Ke committed
691
  std::string weight_column = "";
Guolin Ke's avatar
Guolin Ke committed
692

693
694
695
696
697
  // type = int or string
  // alias = group, group_id, query_column, query, query_id
  // desc = used to specify the query/group id column
  // desc = use number for index, e.g. ``query=0`` means column\_0 is the query id
  // desc = add a prefix ``name:`` for column name, e.g. ``query=name:query_id``
698
  // desc = **Note**: works only in case of loading data directly from text file
699
  // desc = **Note**: data should be grouped by query\_id, for more information, see `Query Data <#query-data>`__
700
  // desc = **Note**: index starts from ``0`` and it doesn't count the label column when passing type is ``int``, e.g. when label is column\_0 and query\_id is column\_1, the correct parameter is ``query=0``
Guolin Ke's avatar
Guolin Ke committed
701
  std::string group_column = "";
Guolin Ke's avatar
Guolin Ke committed
702

703
  // type = multi-int or string
Guolin Ke's avatar
Guolin Ke committed
704
  // alias = ignore_feature, blacklist
705
706
707
  // desc = used to specify some ignoring columns in training
  // desc = use number for index, e.g. ``ignore_column=0,1,2`` means column\_0, column\_1 and column\_2 will be ignored
  // desc = add a prefix ``name:`` for column name, e.g. ``ignore_column=name:c1,c2,c3`` means c1, c2 and c3 will be ignored
708
  // desc = **Note**: works only in case of loading data directly from text file
709
  // desc = **Note**: index starts from ``0`` and it doesn't count the label column when passing type is ``int``
710
  // desc = **Note**: despite the fact that specified columns will be completely ignored during the training, they still should have a valid format allowing LightGBM to load file successfully
Guolin Ke's avatar
Guolin Ke committed
711
  std::string ignore_column = "";
712

713
  // type = multi-int or string
714
  // alias = cat_feature, categorical_column, cat_column, categorical_features
715
716
717
  // desc = used to specify categorical features
  // desc = use number for index, e.g. ``categorical_feature=0,1,2`` means column\_0, column\_1 and column\_2 are categorical features
  // desc = add a prefix ``name:`` for column name, e.g. ``categorical_feature=name:c1,c2,c3`` means c1, c2 and c3 are categorical features
718
  // desc = **Note**: all values will be cast to ``int32`` (integer codes will be extracted from pandas categoricals in the Python-package)
719
720
  // desc = **Note**: index starts from ``0`` and it doesn't count the label column when passing type is ``int``
  // desc = **Note**: all values should be less than ``Int32.MaxValue`` (2147483647)
721
  // desc = **Note**: using large values could be memory consuming. Tree decision rule works best when categorical features are presented by consecutive integers starting from zero
722
  // desc = **Note**: all negative values will be treated as **missing values**
723
  // desc = **Note**: the output cannot be monotonically constrained with respect to a categorical feature
724
  // desc = **Note**: floating point numbers in categorical features will be rounded towards 0
Guolin Ke's avatar
Guolin Ke committed
725
726
  std::string categorical_feature = "";

727
728
729
730
731
  // desc = path to a ``.json`` file that specifies bin upper bounds for some or all features
  // desc = ``.json`` file should contain an array of objects, each containing the word ``feature`` (integer feature index) and ``bin_upper_bound`` (array of thresholds for binning)
  // desc = see `this file <https://github.com/microsoft/LightGBM/tree/master/examples/regression/forced_bins.json>`__ as an example
  std::string forcedbins_filename = "";

732
  // [no-save]
733
734
735
736
737
738
  // alias = is_save_binary, is_save_binary_file
  // desc = if ``true``, LightGBM will save the dataset (including validation data) to a binary file. This speed ups the data loading for the next time
  // desc = **Note**: ``init_score`` is not saved in binary file
  // desc = **Note**: can be used only in CLI version; for language-specific packages you can use the correspondent function
  bool save_binary = false;

Chen Yufei's avatar
Chen Yufei committed
739
740
741
742
  // desc = use precise floating point number parsing for text parser (e.g. CSV, TSV, LibSVM input)
  // desc = **Note**: setting this to ``true`` may lead to much slower text parsing
  bool precise_float_parser = false;

743
744
745
746
747
  // desc = path to a ``.json`` file that specifies customized parser initialized configuration
  // desc = see `lightgbm-transform <https://github.com/microsoft/lightgbm-transform>`__ for usage examples
  // desc = **Note**: ``lightgbm-transform`` is not maintained by LightGBM's maintainers. Bug reports or feature requests should go to `issues page <https://github.com/microsoft/lightgbm-transform/issues>`__
  std::string parser_config_file = "";

748
  #ifndef __NVCC__
749
750
751
  #pragma endregion

  #pragma region Predict Parameters
752
  #endif  // __NVCC__
753

754
755
756
757
758
759
  // [no-save]
  // desc = used only in ``prediction`` task
  // desc = used to specify from which iteration to start the prediction
  // desc = ``<= 0`` means from the first iteration
  int start_iteration_predict = 0;

760
  // [no-save]
761
762
763
764
765
  // desc = used only in ``prediction`` task
  // desc = used to specify how many trained iterations will be used in prediction
  // desc = ``<= 0`` means no limit
  int num_iteration_predict = -1;

766
  // [no-save]
767
768
769
770
  // alias = is_predict_raw_score, predict_rawscore, raw_score
  // desc = used only in ``prediction`` task
  // desc = set this to ``true`` to predict only the raw scores
  // desc = set this to ``false`` to predict transformed scores
Guolin Ke's avatar
Guolin Ke committed
771
772
  bool predict_raw_score = false;

773
  // [no-save]
774
775
776
  // alias = is_predict_leaf_index, leaf_index
  // desc = used only in ``prediction`` task
  // desc = set this to ``true`` to predict with leaf index of all trees
Guolin Ke's avatar
Guolin Ke committed
777
778
  bool predict_leaf_index = false;

779
  // [no-save]
780
781
  // alias = is_predict_contrib, contrib
  // desc = used only in ``prediction`` task
782
  // desc = set this to ``true`` to estimate `SHAP values <https://arxiv.org/abs/1706.06060>`__, which represent how each feature contributes to each prediction
783
  // desc = produces ``#features + 1`` values where the last value is the expected value of the model output over the training data
784
  // desc = **Note**: if you want to get more explanation for your model's predictions using SHAP values like SHAP interaction values, you can install `shap package <https://github.com/slundberg/shap>`__
Nikita Titov's avatar
Nikita Titov committed
785
  // desc = **Note**: unlike the shap package, with ``predict_contrib`` we return a matrix with an extra column, where the last column is the expected value
786
  // desc = **Note**: this feature is not implemented for linear trees
Guolin Ke's avatar
Guolin Ke committed
787
788
  bool predict_contrib = false;

789
  // [no-save]
790
  // desc = used only in ``prediction`` task
791
792
793
794
795
  // desc = control whether or not LightGBM raises an error when you try to predict on data with a different number of features than the training data
  // desc = if ``false`` (the default), a fatal error will be raised if the number of features in the dataset you predict on differs from the number seen during training
  // desc = if ``true``, LightGBM will attempt to predict on whatever data you provide. This is dangerous because you might get incorrect predictions, but you could use it in situations where it is difficult or expensive to generate some features and you are very confident that they were never chosen for splits in the model
  // desc = **Note**: be very careful setting this parameter to ``true``
  bool predict_disable_shape_check = false;
Guolin Ke's avatar
Guolin Ke committed
796

797
  // [no-save]
798
  // desc = used only in ``prediction`` task
799
  // desc = used only in ``classification`` and ``ranking`` applications
800
  // desc = used only for predicting normal or raw scores
801
  // desc = if ``true``, will use early-stopping to speed up the prediction. May affect the accuracy
802
  // desc = **Note**: cannot be used with ``rf`` boosting type or custom objective function
803
  bool pred_early_stop = false;
804

805
  // [no-save]
806
807
  // desc = used only in ``prediction`` task
  // desc = the frequency of checking early-stopping prediction
808
  int pred_early_stop_freq = 10;
Guolin Ke's avatar
Guolin Ke committed
809

810
  // [no-save]
811
812
  // desc = used only in ``prediction`` task
  // desc = the threshold of margin in early-stopping prediction
Guolin Ke's avatar
Guolin Ke committed
813
  double pred_early_stop_margin = 10.0;
Guolin Ke's avatar
Guolin Ke committed
814

815
  // [no-save]
816
  // alias = predict_result, prediction_result, predict_name, prediction_name, pred_name, name_pred
817
  // desc = used only in ``prediction`` task
818
819
820
821
  // desc = filename of prediction result
  // desc = **Note**: can be used only in CLI version
  std::string output_result = "LightGBM_predict_result.txt";

822
  #ifndef __NVCC__
823
824
825
  #pragma endregion

  #pragma region Convert Parameters
826
  #endif  // __NVCC__
827

828
  // [no-save]
829
  // desc = used only in ``convert_model`` task
830
  // desc = only ``cpp`` is supported yet; for conversion model to other languages consider using `m2cgen <https://github.com/BayesWitnesses/m2cgen>`__ utility
831
  // desc = if ``convert_model_language`` is set and ``task=train``, the model will be also converted
832
  // desc = **Note**: can be used only in CLI version
Guolin Ke's avatar
Guolin Ke committed
833
834
  std::string convert_model_language = "";

835
  // [no-save]
836
837
838
  // alias = convert_model_file
  // desc = used only in ``convert_model`` task
  // desc = output filename of converted model
839
  // desc = **Note**: can be used only in CLI version
Guolin Ke's avatar
Guolin Ke committed
840
841
  std::string convert_model = "gbdt_prediction.cpp";

842
  #ifndef __NVCC__
843
  #pragma endregion
Guolin Ke's avatar
Guolin Ke committed
844

845
846
  #pragma endregion

Guolin Ke's avatar
Guolin Ke committed
847
  #pragma region Objective Parameters
848
  #endif  // __NVCC__
Guolin Ke's avatar
Guolin Ke committed
849

850
  // desc = used only in ``rank_xendcg`` objective
851
852
853
  // desc = random seed for objectives, if random process is needed
  int objective_seed = 5;

854
855
856
857
  // check = >0
  // alias = num_classes
  // desc = used only in ``multi-class`` classification application
  int num_class = 1;
Guolin Ke's avatar
Guolin Ke committed
858

859
  // alias = unbalance, unbalanced_sets
860
  // desc = used only in ``binary`` and ``multiclassova`` applications
861
  // desc = set this to ``true`` if training data are unbalanced
862
  // desc = **Note**: while enabling this should increase the overall performance metric of your model, it will also result in poor estimates of the individual class probabilities
863
864
  // desc = **Note**: this parameter cannot be used at the same time with ``scale_pos_weight``, choose only **one** of them
  bool is_unbalance = false;
Guolin Ke's avatar
Guolin Ke committed
865

866
  // check = >0.0
867
  // desc = used only in ``binary`` and ``multiclassova`` applications
868
  // desc = weight of labels with positive class
869
  // desc = **Note**: while enabling this should increase the overall performance metric of your model, it will also result in poor estimates of the individual class probabilities
870
871
  // desc = **Note**: this parameter cannot be used at the same time with ``is_unbalance``, choose only **one** of them
  double scale_pos_weight = 1.0;
Guolin Ke's avatar
Guolin Ke committed
872

873
874
875
876
  // check = >0.0
  // desc = used only in ``binary`` and ``multiclassova`` classification and in ``lambdarank`` applications
  // desc = parameter for the sigmoid function
  double sigmoid = 1.0;
Guolin Ke's avatar
Guolin Ke committed
877

878
  // desc = used only in ``regression``, ``binary``, ``multiclassova`` and ``cross-entropy`` applications
879
  // desc = adjusts initial score to the mean of labels for faster convergence
Guolin Ke's avatar
Guolin Ke committed
880
881
  bool boost_from_average = true;

882
883
884
885
  // desc = used only in ``regression`` application
  // desc = used to fit ``sqrt(label)`` instead of original values and prediction result will be also automatically converted to ``prediction^2``
  // desc = might be useful in case of large-range labels
  bool reg_sqrt = false;
Guolin Ke's avatar
Guolin Ke committed
886

887
888
889
890
  // check = >0.0
  // desc = used only in ``huber`` and ``quantile`` ``regression`` applications
  // desc = parameter for `Huber loss <https://en.wikipedia.org/wiki/Huber_loss>`__ and `Quantile regression <https://en.wikipedia.org/wiki/Quantile_regression>`__
  double alpha = 0.9;
Guolin Ke's avatar
Guolin Ke committed
891

892
893
894
895
  // check = >0.0
  // desc = used only in ``fair`` ``regression`` application
  // desc = parameter for `Fair loss <https://www.kaggle.com/c/allstate-claims-severity/discussion/24520>`__
  double fair_c = 1.0;
Guolin Ke's avatar
Guolin Ke committed
896

897
898
899
900
  // check = >0.0
  // desc = used only in ``poisson`` ``regression`` application
  // desc = parameter for `Poisson regression <https://en.wikipedia.org/wiki/Poisson_regression>`__ to safeguard optimization
  double poisson_max_delta_step = 0.7;
Guolin Ke's avatar
Guolin Ke committed
901

902
903
904
905
906
907
908
  // check = >=1.0
  // check = <2.0
  // desc = used only in ``tweedie`` ``regression`` application
  // desc = used to control the variance of the tweedie distribution
  // desc = set this closer to ``2`` to shift towards a **Gamma** distribution
  // desc = set this closer to ``1`` to shift towards a **Poisson** distribution
  double tweedie_variance_power = 1.5;
Guolin Ke's avatar
Guolin Ke committed
909

910
911
  // check = >0
  // desc = used only in ``lambdarank`` application
Nikita Titov's avatar
Nikita Titov committed
912
913
  // desc = controls the number of top-results to focus on during training, refer to "truncation level" in the Sec. 3 of `LambdaMART paper <https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/MSR-TR-2010-82.pdf>`__
  // desc = this parameter is closely related to the desirable cutoff ``k`` in the metric **NDCG@k** that we aim at optimizing the ranker for. The optimal setting for this parameter is likely to be slightly higher than ``k`` (e.g., ``k + 3``) to include more pairs of documents to train on, but perhaps not too high to avoid deviating too much from the desired target metric **NDCG@k**
914
  int lambdarank_truncation_level = 30;
Guolin Ke's avatar
Guolin Ke committed
915

916
917
  // desc = used only in ``lambdarank`` application
  // desc = set this to ``true`` to normalize the lambdas for different queries, and improve the performance for unbalanced data
918
919
  // desc = set this to ``false`` to enforce the original lambdarank algorithm
  bool lambdarank_norm = true;
920

921
922
923
924
925
926
927
  // type = multi-double
  // default = 0,1,3,7,15,31,63,...,2^30-1
  // desc = used only in ``lambdarank`` application
  // desc = relevant gain for labels. For example, the gain of label ``2`` is ``3`` in case of default label gains
  // desc = separate by ``,``
  std::vector<double> label_gain;

928
  #ifndef __NVCC__
Guolin Ke's avatar
Guolin Ke committed
929
930
931
  #pragma endregion

  #pragma region Metric Parameters
932
  #endif  // __NVCC__
933

Guolin Ke's avatar
Guolin Ke committed
934
  // [doc-only]
935
936
937
  // alias = metrics, metric_types
  // default = ""
  // type = multi-enum
938
  // desc = metric(s) to be evaluated on the evaluation set(s)
939
  // descl2 = ``""`` (empty string or not specified) means that metric corresponding to specified ``objective`` will be used (this is possible only for pre-defined objective functions, otherwise no evaluation metric will be added)
940
  // descl2 = ``"None"`` (string, **not** a ``None`` value) means that no metric will be registered, aliases: ``na``, ``null``, ``custom``
941
942
  // descl2 = ``l1``, absolute loss, aliases: ``mean_absolute_error``, ``mae``, ``regression_l1``
  // descl2 = ``l2``, square loss, aliases: ``mean_squared_error``, ``mse``, ``regression_l2``, ``regression``
943
  // descl2 = ``rmse``, root square loss, aliases: ``root_mean_squared_error``, ``l2_root``
944
945
946
947
948
949
950
951
  // descl2 = ``quantile``, `Quantile regression <https://en.wikipedia.org/wiki/Quantile_regression>`__
  // descl2 = ``mape``, `MAPE loss <https://en.wikipedia.org/wiki/Mean_absolute_percentage_error>`__, aliases: ``mean_absolute_percentage_error``
  // descl2 = ``huber``, `Huber loss <https://en.wikipedia.org/wiki/Huber_loss>`__
  // descl2 = ``fair``, `Fair loss <https://www.kaggle.com/c/allstate-claims-severity/discussion/24520>`__
  // descl2 = ``poisson``, negative log-likelihood for `Poisson regression <https://en.wikipedia.org/wiki/Poisson_regression>`__
  // descl2 = ``gamma``, negative log-likelihood for **Gamma** regression
  // descl2 = ``gamma_deviance``, residual deviance for **Gamma** regression
  // descl2 = ``tweedie``, negative log-likelihood for **Tweedie** regression
952
  // descl2 = ``ndcg``, `NDCG <https://en.wikipedia.org/wiki/Discounted_cumulative_gain#Normalized_DCG>`__, aliases: ``lambdarank``, ``rank_xendcg``, ``xendcg``, ``xe_ndcg``, ``xe_ndcg_mart``, ``xendcg_mart``
953
954
  // descl2 = ``map``, `MAP <https://makarandtapaswi.wordpress.com/2012/07/02/intuition-behind-average-precision-and-map/>`__, aliases: ``mean_average_precision``
  // descl2 = ``auc``, `AUC <https://en.wikipedia.org/wiki/Receiver_operating_characteristic#Area_under_the_curve>`__
955
  // descl2 = ``average_precision``, `average precision score <https://scikit-learn.org/stable/modules/generated/sklearn.metrics.average_precision_score.html>`__
956
957
  // descl2 = ``binary_logloss``, `log loss <https://en.wikipedia.org/wiki/Cross_entropy>`__, aliases: ``binary``
  // descl2 = ``binary_error``, for one sample: ``0`` for correct classification, ``1`` for error classification
Belinda Trotta's avatar
Belinda Trotta committed
958
  // descl2 = ``auc_mu``, `AUC-mu <http://proceedings.mlr.press/v97/kleiman19a/kleiman19a.pdf>`__
959
960
  // descl2 = ``multi_logloss``, log loss for multi-class classification, aliases: ``multiclass``, ``softmax``, ``multiclassova``, ``multiclass_ova``, ``ova``, ``ovr``
  // descl2 = ``multi_error``, error rate for multi-class classification
Guolin Ke's avatar
Guolin Ke committed
961
962
963
  // descl2 = ``cross_entropy``, cross-entropy (with optional linear weights), aliases: ``xentropy``
  // descl2 = ``cross_entropy_lambda``, "intensity-weighted" cross-entropy, aliases: ``xentlambda``
  // descl2 = ``kullback_leibler``, `Kullback-Leibler divergence <https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence>`__, aliases: ``kldiv``
964
  // desc = support multiple metrics, separated by ``,``
Guolin Ke's avatar
Guolin Ke committed
965
966
  std::vector<std::string> metric;

967
  // [no-save]
968
  // check = >0
Guolin Ke's avatar
Guolin Ke committed
969
970
  // alias = output_freq
  // desc = frequency for metric output
971
  // desc = **Note**: can be used only in CLI version
Guolin Ke's avatar
Guolin Ke committed
972
973
  int metric_freq = 1;

974
  // [no-save]
975
976
  // alias = training_metric, is_training_metric, train_metric
  // desc = set this to ``true`` to output metric result over training dataset
977
  // desc = **Note**: can be used only in CLI version
978
  bool is_provide_training_metric = false;
979

980
981
  // type = multi-int
  // default = 1,2,3,4,5
982
  // alias = ndcg_eval_at, ndcg_at, map_eval_at, map_at
983
  // desc = used only with ``ndcg`` and ``map`` metrics
984
  // desc = `NDCG <https://en.wikipedia.org/wiki/Discounted_cumulative_gain#Normalized_DCG>`__ and `MAP <https://makarandtapaswi.wordpress.com/2012/07/02/intuition-behind-average-precision-and-map/>`__ evaluation positions, separated by ``,``
Guolin Ke's avatar
Guolin Ke committed
985
  std::vector<int> eval_at;
Guolin Ke's avatar
Guolin Ke committed
986

Belinda Trotta's avatar
Belinda Trotta committed
987
988
989
990
991
992
993
994
  // check = >0
  // desc = used only with ``multi_error`` metric
  // desc = threshold for top-k multi-error metric
  // desc = the error on each sample is ``0`` if the true class is among the top ``multi_error_top_k`` predictions, and ``1`` otherwise
  // descl2 = more precisely, the error on a sample is ``0`` if there are at least ``num_classes - multi_error_top_k`` predictions strictly less than the prediction on the true class
  // desc = when ``multi_error_top_k=1`` this is equivalent to the usual multi-error metric
  int multi_error_top_k = 1;

Belinda Trotta's avatar
Belinda Trotta committed
995
996
997
998
999
1000
1001
1002
1003
  // type = multi-double
  // default = None
  // desc = used only with ``auc_mu`` metric
  // desc = list representing flattened matrix (in row-major order) giving loss weights for classification errors
  // desc = list should have ``n * n`` elements, where ``n`` is the number of classes
  // desc = the matrix co-ordinate ``[i, j]`` should correspond to the ``i * n + j``-th element of the list
  // desc = if not specified, will use equal weights for all classes
  std::vector<double> auc_mu_weights;

1004
  #ifndef __NVCC__
Guolin Ke's avatar
Guolin Ke committed
1005
1006
1007
  #pragma endregion

  #pragma region Network Parameters
1008
  #endif  // __NVCC__
Guolin Ke's avatar
Guolin Ke committed
1009

1010
1011
  // check = >0
  // alias = num_machine
1012
  // desc = the number of machines for distributed learning application
1013
  // desc = this parameter is needed to be set in both **socket** and **mpi** versions
Guolin Ke's avatar
Guolin Ke committed
1014
  int num_machines = 1;
Guolin Ke's avatar
Guolin Ke committed
1015

1016
  // check = >0
1017
  // default = 12400 (random for Dask-package)
1018
1019
1020
  // alias = local_port, port
  // desc = TCP listen port for local machines
  // desc = **Note**: don't forget to allow this port in firewall settings before training
Guolin Ke's avatar
Guolin Ke committed
1021
  int local_listen_port = 12400;
Guolin Ke's avatar
Guolin Ke committed
1022

1023
1024
1025
  // check = >0
  // desc = socket time-out in minutes
  int time_out = 120;
Guolin Ke's avatar
Guolin Ke committed
1026

1027
  // alias = machine_list_file, machine_list, mlist
1028
  // desc = path of file that lists machines for this distributed learning application
1029
  // desc = each line contains one IP and one port for one machine. The format is ``ip port`` (space as a separator)
1030
  // desc = **Note**: can be used only in CLI version
Guolin Ke's avatar
Guolin Ke committed
1031
  std::string machine_list_filename = "";
Guolin Ke's avatar
Guolin Ke committed
1032

1033
1034
  // alias = workers, nodes
  // desc = list of machines in the following format: ``ip1:port1,ip2:port2``
1035
  std::string machines = "";
Guolin Ke's avatar
Guolin Ke committed
1036

1037
  #ifndef __NVCC__
Guolin Ke's avatar
Guolin Ke committed
1038
1039
1040
  #pragma endregion

  #pragma region GPU Parameters
1041
  #endif  // __NVCC__
Guolin Ke's avatar
Guolin Ke committed
1042

1043
1044
  // desc = OpenCL platform ID. Usually each GPU vendor exposes one OpenCL platform
  // desc = ``-1`` means the system-wide default platform
1045
  // desc = **Note**: refer to `GPU Targets <./GPU-Targets.rst#query-opencl-devices-in-your-system>`__ for more details
Guolin Ke's avatar
Guolin Ke committed
1046
1047
  int gpu_platform_id = -1;

1048
1049
  // desc = OpenCL device ID in the specified platform. Each GPU in the selected platform has a unique device ID
  // desc = ``-1`` means the default device in the selected platform
1050
  // desc = **Note**: refer to `GPU Targets <./GPU-Targets.rst#query-opencl-devices-in-your-system>`__ for more details
Guolin Ke's avatar
Guolin Ke committed
1051
1052
  int gpu_device_id = -1;

1053
1054
  // desc = set this to ``true`` to use double precision math on GPU (by default single precision is used)
  // desc = **Note**: can be used only in OpenCL implementation, in CUDA implementation only double precision is currently supported
Guolin Ke's avatar
Guolin Ke committed
1055
1056
  bool gpu_use_dp = false;

1057
1058
1059
1060
1061
  // check = >0
  // desc = number of GPUs
  // desc = **Note**: can be used only in CUDA implementation
  int num_gpu = 1;

1062
  #ifndef __NVCC__
Guolin Ke's avatar
Guolin Ke committed
1063
1064
1065
  #pragma endregion

  #pragma endregion
1066
  #endif  // __NVCC__
Guolin Ke's avatar
Guolin Ke committed
1067

1068
1069
  size_t file_load_progress_interval_bytes = size_t(10) * 1024 * 1024 * 1024;

Guolin Ke's avatar
Guolin Ke committed
1070
  bool is_parallel = false;
1071
  bool is_data_based_parallel = false;
Guolin Ke's avatar
Guolin Ke committed
1072
  LIGHTGBM_EXPORT void Set(const std::unordered_map<std::string, std::string>& params);
jcipar's avatar
jcipar committed
1073
  static const std::unordered_map<std::string, std::string>& alias_table();
1074
  static const std::unordered_map<std::string, std::vector<std::string>>& parameter2aliases();
jcipar's avatar
jcipar committed
1075
  static const std::unordered_set<std::string>& parameter_set();
Belinda Trotta's avatar
Belinda Trotta committed
1076
  std::vector<std::vector<double>> auc_mu_weights_matrix;
1077
  std::vector<std::vector<int>> interaction_constraints_vector;
1078
  static const std::string DumpAliases();
1079

Nikita Titov's avatar
Nikita Titov committed
1080
 private:
Guolin Ke's avatar
Guolin Ke committed
1081
  void CheckParamConflict();
Guolin Ke's avatar
Guolin Ke committed
1082
1083
  void GetMembersFromString(const std::unordered_map<std::string, std::string>& params);
  std::string SaveMembersToString() const;
Belinda Trotta's avatar
Belinda Trotta committed
1084
  void GetAucMuWeights();
1085
  void GetInteractionConstraints();
Guolin Ke's avatar
Guolin Ke committed
1086
1087
};

Guolin Ke's avatar
Guolin Ke committed
1088
inline bool Config::GetString(
Guolin Ke's avatar
Guolin Ke committed
1089
1090
  const std::unordered_map<std::string, std::string>& params,
  const std::string& name, std::string* out) {
1091
  if (params.count(name) > 0 && !params.at(name).empty()) {
Guolin Ke's avatar
Guolin Ke committed
1092
1093
1094
1095
1096
1097
    *out = params.at(name);
    return true;
  }
  return false;
}

Guolin Ke's avatar
Guolin Ke committed
1098
inline bool Config::GetInt(
Guolin Ke's avatar
Guolin Ke committed
1099
1100
  const std::unordered_map<std::string, std::string>& params,
  const std::string& name, int* out) {
1101
  if (params.count(name) > 0 && !params.at(name).empty()) {
1102
    if (!Common::AtoiAndCheck(params.at(name).c_str(), out)) {
1103
      Log::Fatal("Parameter %s should be of type int, got \"%s\"",
Guolin Ke's avatar
Guolin Ke committed
1104
                 name.c_str(), params.at(name).c_str());
1105
    }
Guolin Ke's avatar
Guolin Ke committed
1106
1107
1108
1109
1110
    return true;
  }
  return false;
}

Guolin Ke's avatar
Guolin Ke committed
1111
inline bool Config::GetDouble(
Guolin Ke's avatar
Guolin Ke committed
1112
  const std::unordered_map<std::string, std::string>& params,
1113
  const std::string& name, double* out) {
1114
  if (params.count(name) > 0 && !params.at(name).empty()) {
1115
    if (!Common::AtofAndCheck(params.at(name).c_str(), out)) {
1116
      Log::Fatal("Parameter %s should be of type double, got \"%s\"",
Guolin Ke's avatar
Guolin Ke committed
1117
                 name.c_str(), params.at(name).c_str());
1118
    }
Guolin Ke's avatar
Guolin Ke committed
1119
1120
1121
1122
1123
    return true;
  }
  return false;
}

Guolin Ke's avatar
Guolin Ke committed
1124
inline bool Config::GetBool(
Guolin Ke's avatar
Guolin Ke committed
1125
1126
  const std::unordered_map<std::string, std::string>& params,
  const std::string& name, bool* out) {
1127
  if (params.count(name) > 0 && !params.at(name).empty()) {
Guolin Ke's avatar
Guolin Ke committed
1128
    std::string value = params.at(name);
Guolin Ke's avatar
Guolin Ke committed
1129
    std::transform(value.begin(), value.end(), value.begin(), Common::tolower);
1130
    if (value == std::string("false") || value == std::string("-")) {
Guolin Ke's avatar
Guolin Ke committed
1131
      *out = false;
1132
    } else if (value == std::string("true") || value == std::string("+")) {
Guolin Ke's avatar
Guolin Ke committed
1133
      *out = true;
1134
    } else {
1135
      Log::Fatal("Parameter %s should be \"true\"/\"+\" or \"false\"/\"-\", got \"%s\"",
Guolin Ke's avatar
Guolin Ke committed
1136
                 name.c_str(), params.at(name).c_str());
Guolin Ke's avatar
Guolin Ke committed
1137
1138
1139
1140
1141
1142
    }
    return true;
  }
  return false;
}

1143
1144
1145
1146
inline bool Config::SortAlias(const std::string& x, const std::string& y) {
  return x.size() < y.size() || (x.size() == y.size() && x < y);
}

Guolin Ke's avatar
Guolin Ke committed
1147
1148
1149
1150
struct ParameterAlias {
  static void KeyAliasTransform(std::unordered_map<std::string, std::string>* params) {
    std::unordered_map<std::string, std::string> tmp_map;
    for (const auto& pair : *params) {
jcipar's avatar
jcipar committed
1151
1152
      auto alias = Config::alias_table().find(pair.first);
      if (alias != Config::alias_table().end()) {  // found alias
Guolin Ke's avatar
Guolin Ke committed
1153
        auto alias_set = tmp_map.find(alias->second);
1154
        if (alias_set != tmp_map.end()) {  // alias already set
1155
          if (Config::SortAlias(alias_set->second, pair.first)) {
1156
            Log::Warning("%s is set with %s=%s, %s=%s will be ignored. Current value: %s=%s",
Guolin Ke's avatar
Guolin Ke committed
1157
1158
                         alias->second.c_str(), alias_set->second.c_str(), params->at(alias_set->second).c_str(),
                         pair.first.c_str(), pair.second.c_str(), alias->second.c_str(), params->at(alias_set->second).c_str());
wxchan's avatar
wxchan committed
1159
          } else {
1160
            Log::Warning("%s is set with %s=%s, will be overridden by %s=%s. Current value: %s=%s",
Guolin Ke's avatar
Guolin Ke committed
1161
1162
                         alias->second.c_str(), alias_set->second.c_str(), params->at(alias_set->second).c_str(),
                         pair.first.c_str(), pair.second.c_str(), alias->second.c_str(), pair.second.c_str());
wxchan's avatar
wxchan committed
1163
1164
            tmp_map[alias->second] = pair.first;
          }
1165
        } else {  // alias not set
wxchan's avatar
wxchan committed
1166
1167
          tmp_map.emplace(alias->second, pair.first);
        }
jcipar's avatar
jcipar committed
1168
      } else if (Config::parameter_set().find(pair.first) == Config::parameter_set().end()) {
wxchan's avatar
wxchan committed
1169
        Log::Warning("Unknown parameter: %s", pair.first.c_str());
Guolin Ke's avatar
Guolin Ke committed
1170
1171
1172
      }
    }
    for (const auto& pair : tmp_map) {
wxchan's avatar
wxchan committed
1173
      auto alias = params->find(pair.first);
1174
      if (alias == params->end()) {  // not find
wxchan's avatar
wxchan committed
1175
1176
1177
        params->emplace(pair.first, params->at(pair.second));
        params->erase(pair.second);
      } else {
Guolin Ke's avatar
Guolin Ke committed
1178
1179
1180
        Log::Warning("%s is set=%s, %s=%s will be ignored. Current value: %s=%s",
                     pair.first.c_str(), alias->second.c_str(), pair.second.c_str(), params->at(pair.second).c_str(),
                     pair.first.c_str(), alias->second.c_str());
Guolin Ke's avatar
Guolin Ke committed
1181
1182
1183
1184
1185
      }
    }
  }
};

1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
inline std::string ParseObjectiveAlias(const std::string& type) {
  if (type == std::string("regression") || type == std::string("regression_l2")
    || type == std::string("mean_squared_error") || type == std::string("mse") || type == std::string("l2")
    || type == std::string("l2_root") || type == std::string("root_mean_squared_error") || type == std::string("rmse")) {
    return "regression";
  } else if (type == std::string("regression_l1") || type == std::string("mean_absolute_error")
    || type == std::string("l1") || type == std::string("mae")) {
    return "regression_l1";
  } else if (type == std::string("multiclass") || type == std::string("softmax")) {
    return "multiclass";
  } else if (type == std::string("multiclassova") || type == std::string("multiclass_ova") || type == std::string("ova") || type == std::string("ovr")) {
    return "multiclassova";
  } else if (type == std::string("xentropy") || type == std::string("cross_entropy")) {
    return "cross_entropy";
  } else if (type == std::string("xentlambda") || type == std::string("cross_entropy_lambda")) {
    return "cross_entropy_lambda";
  } else if (type == std::string("mean_absolute_percentage_error") || type == std::string("mape")) {
    return "mape";
1204
1205
1206
  } else if (type == std::string("rank_xendcg") || type == std::string("xendcg") || type == std::string("xe_ndcg")
             || type == std::string("xe_ndcg_mart") || type == std::string("xendcg_mart")) {
    return "rank_xendcg";
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
  } else if (type == std::string("none") || type == std::string("null") || type == std::string("custom") || type == std::string("na")) {
    return "custom";
  }
  return type;
}

inline std::string ParseMetricAlias(const std::string& type) {
  if (type == std::string("regression") || type == std::string("regression_l2") || type == std::string("l2") || type == std::string("mean_squared_error") || type == std::string("mse")) {
    return "l2";
  } else if (type == std::string("l2_root") || type == std::string("root_mean_squared_error") || type == std::string("rmse")) {
    return "rmse";
  } else if (type == std::string("regression_l1") || type == std::string("l1") || type == std::string("mean_absolute_error") || type == std::string("mae")) {
    return "l1";
  } else if (type == std::string("binary_logloss") || type == std::string("binary")) {
    return "binary_logloss";
1222
1223
  } else if (type == std::string("ndcg") || type == std::string("lambdarank") || type == std::string("rank_xendcg")
             || type == std::string("xendcg") || type == std::string("xe_ndcg") || type == std::string("xe_ndcg_mart") || type == std::string("xendcg_mart")) {
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
    return "ndcg";
  } else if (type == std::string("map") || type == std::string("mean_average_precision")) {
    return "map";
  } else if (type == std::string("multi_logloss") || type == std::string("multiclass") || type == std::string("softmax") || type == std::string("multiclassova") || type == std::string("multiclass_ova") || type == std::string("ova") || type == std::string("ovr")) {
    return "multi_logloss";
  } else if (type == std::string("xentropy") || type == std::string("cross_entropy")) {
    return "cross_entropy";
  } else if (type == std::string("xentlambda") || type == std::string("cross_entropy_lambda")) {
    return "cross_entropy_lambda";
  } else if (type == std::string("kldiv") || type == std::string("kullback_leibler")) {
    return "kullback_leibler";
  } else if (type == std::string("mean_absolute_percentage_error") || type == std::string("mape")) {
    return "mape";
  } else if (type == std::string("none") || type == std::string("null") || type == std::string("custom") || type == std::string("na")) {
    return "custom";
  }
  return type;
}

Guolin Ke's avatar
Guolin Ke committed
1243
1244
}   // namespace LightGBM

Belinda Trotta's avatar
Belinda Trotta committed
1245
#endif   // LightGBM_CONFIG_H_