config.h 67.9 KB
Newer Older
1
2
3
4
5
/*!
 * Copyright (c) 2016 Microsoft Corporation. All rights reserved.
 * Licensed under the MIT License. See LICENSE file in the project root for license information.
 *
 * \note
6
7
8
9
 * - desc and descl2 fields must be written in reStructuredText format;
 * - nested sections can be placed only at the bottom of parent's section;
 * - [doc-only] tag indicates that only documentation for this param should be generated and all other actions are performed manually;
 * - [no-save] tag indicates that this param should not be saved into a model text representation.
10
 */
Guolin Ke's avatar
Guolin Ke committed
11
12
13
#ifndef LIGHTGBM_CONFIG_H_
#define LIGHTGBM_CONFIG_H_

14
15
16
17
18
#include <LightGBM/export.h>
#include <LightGBM/meta.h>
#include <LightGBM/utils/common.h>
#include <LightGBM/utils/log.h>

Guolin Ke's avatar
Guolin Ke committed
19
20
#include <string>
#include <algorithm>
Guolin Ke's avatar
Guolin Ke committed
21
#include <memory>
22
23
24
#include <unordered_map>
#include <unordered_set>
#include <vector>
Guolin Ke's avatar
Guolin Ke committed
25
26
27

namespace LightGBM {

Guolin Ke's avatar
Guolin Ke committed
28
29
/*! \brief Types of tasks */
enum TaskType {
30
  kTrain, kPredict, kConvertModel, KRefitTree, kSaveBinary
Guolin Ke's avatar
Guolin Ke committed
31
};
32
const int kDefaultNumLeaves = 31;
Guolin Ke's avatar
Guolin Ke committed
33

Guolin Ke's avatar
Guolin Ke committed
34
struct Config {
Nikita Titov's avatar
Nikita Titov committed
35
 public:
Guolin Ke's avatar
Guolin Ke committed
36
  std::string ToString() const;
Guolin Ke's avatar
Guolin Ke committed
37
38
39
40
  /*!
  * \brief Get string value by specific name of key
  * \param params Store the key and value for params
  * \param name Name of key
Hui Xue's avatar
Hui Xue committed
41
  * \param out Value will assign to out if key exists
Guolin Ke's avatar
Guolin Ke committed
42
43
  * \return True if key exists
  */
Guolin Ke's avatar
Guolin Ke committed
44
  inline static bool GetString(
Guolin Ke's avatar
Guolin Ke committed
45
46
47
48
49
50
51
    const std::unordered_map<std::string, std::string>& params,
    const std::string& name, std::string* out);

  /*!
  * \brief Get int value by specific name of key
  * \param params Store the key and value for params
  * \param name Name of key
Hui Xue's avatar
Hui Xue committed
52
  * \param out Value will assign to out if key exists
Guolin Ke's avatar
Guolin Ke committed
53
54
  * \return True if key exists
  */
Guolin Ke's avatar
Guolin Ke committed
55
  inline static bool GetInt(
Guolin Ke's avatar
Guolin Ke committed
56
57
58
59
    const std::unordered_map<std::string, std::string>& params,
    const std::string& name, int* out);

  /*!
60
  * \brief Get double value by specific name of key
Guolin Ke's avatar
Guolin Ke committed
61
62
  * \param params Store the key and value for params
  * \param name Name of key
Hui Xue's avatar
Hui Xue committed
63
  * \param out Value will assign to out if key exists
Guolin Ke's avatar
Guolin Ke committed
64
65
  * \return True if key exists
  */
Guolin Ke's avatar
Guolin Ke committed
66
  inline static bool GetDouble(
Guolin Ke's avatar
Guolin Ke committed
67
    const std::unordered_map<std::string, std::string>& params,
68
    const std::string& name, double* out);
Guolin Ke's avatar
Guolin Ke committed
69
70
71
72
73

  /*!
  * \brief Get bool value by specific name of key
  * \param params Store the key and value for params
  * \param name Name of key
Hui Xue's avatar
Hui Xue committed
74
  * \param out Value will assign to out if key exists
Guolin Ke's avatar
Guolin Ke committed
75
76
  * \return True if key exists
  */
Guolin Ke's avatar
Guolin Ke committed
77
  inline static bool GetBool(
Guolin Ke's avatar
Guolin Ke committed
78
79
    const std::unordered_map<std::string, std::string>& params,
    const std::string& name, bool* out);
80

81
82
83
84
85
86
87
88
  /*!
  * \brief Sort aliases by length and then alphabetically
  * \param x Alias 1
  * \param y Alias 2
  * \return true if x has higher priority than y
  */
  inline static bool SortAlias(const std::string& x, const std::string& y);

89
90
91
  static void KeepFirstValues(const std::unordered_map<std::string, std::vector<std::string>>& params, std::unordered_map<std::string, std::string>* out);
  static void KV2Map(std::unordered_map<std::string, std::vector<std::string>>* params, const char* kv);
  static void SetVerbosity(const std::unordered_map<std::string, std::vector<std::string>>& params);
92
  static std::unordered_map<std::string, std::string> Str2Map(const char* parameters);
Guolin Ke's avatar
Guolin Ke committed
93

94
  #ifndef __NVCC__
Guolin Ke's avatar
Guolin Ke committed
95
  #pragma region Parameters
96

Guolin Ke's avatar
Guolin Ke committed
97
  #pragma region Core Parameters
98
  #endif  // __NVCC__
Guolin Ke's avatar
Guolin Ke committed
99

100
  // [no-save]
Guolin Ke's avatar
Guolin Ke committed
101
  // [doc-only]
102
103
  // alias = config_file
  // desc = path of config file
104
  // desc = **Note**: can be used only in CLI version
Guolin Ke's avatar
Guolin Ke committed
105
106
  std::string config = "";

107
  // [no-save]
Guolin Ke's avatar
Guolin Ke committed
108
  // [doc-only]
109
110
111
112
113
114
  // type = enum
  // default = train
  // options = train, predict, convert_model, refit
  // alias = task_type
  // desc = ``train``, for training, aliases: ``training``
  // desc = ``predict``, for prediction, aliases: ``prediction``, ``test``
Nikita Titov's avatar
Nikita Titov committed
115
  // desc = ``convert_model``, for converting model file into if-else format, see more information in `Convert Parameters <#convert-parameters>`__
116
  // desc = ``refit``, for refitting existing models with new data, aliases: ``refit_tree``
117
  // desc = ``save_binary``, load train (and validation) data then save dataset to binary file. Typical usage: ``save_binary`` first, then run multiple ``train`` tasks in parallel using the saved binary file
Guolin Ke's avatar
Guolin Ke committed
118
  // desc = **Note**: can be used only in CLI version; for language-specific packages you can use the correspondent functions
Guolin Ke's avatar
Guolin Ke committed
119
120
121
  TaskType task = TaskType::kTrain;

  // [doc-only]
122
  // type = enum
123
  // options = regression, regression_l1, huber, fair, poisson, quantile, mape, gamma, tweedie, binary, multiclass, multiclassova, cross_entropy, cross_entropy_lambda, lambdarank, rank_xendcg
124
  // alias = objective_type, app, application, loss
125
  // desc = regression application
Guolin Ke's avatar
Guolin Ke committed
126
127
  // descl2 = ``regression``, L2 loss, aliases: ``regression_l2``, ``l2``, ``mean_squared_error``, ``mse``, ``l2_root``, ``root_mean_squared_error``, ``rmse``
  // descl2 = ``regression_l1``, L1 loss, aliases: ``l1``, ``mean_absolute_error``, ``mae``
128
129
130
131
132
  // descl2 = ``huber``, `Huber loss <https://en.wikipedia.org/wiki/Huber_loss>`__
  // descl2 = ``fair``, `Fair loss <https://www.kaggle.com/c/allstate-claims-severity/discussion/24520>`__
  // descl2 = ``poisson``, `Poisson regression <https://en.wikipedia.org/wiki/Poisson_regression>`__
  // descl2 = ``quantile``, `Quantile regression <https://en.wikipedia.org/wiki/Quantile_regression>`__
  // descl2 = ``mape``, `MAPE loss <https://en.wikipedia.org/wiki/Mean_absolute_percentage_error>`__, aliases: ``mean_absolute_percentage_error``
133
  // descl2 = ``gamma``, Gamma regression with log-link. It might be useful, e.g., for modeling insurance claims severity, or for any target that might be `gamma-distributed <https://en.wikipedia.org/wiki/Gamma_distribution#Occurrence_and_applications>`__
134
  // descl2 = ``tweedie``, Tweedie regression with log-link. It might be useful, e.g., for modeling total loss in insurance, or for any target that might be `tweedie-distributed <https://en.wikipedia.org/wiki/Tweedie_distribution#Occurrence_and_applications>`__
135
136
137
  // desc = binary classification application
  // descl2 = ``binary``, binary `log loss <https://en.wikipedia.org/wiki/Cross_entropy>`__ classification (or logistic regression)
  // descl2 = requires labels in {0, 1}; see ``cross-entropy`` application for general probability labels in [0, 1]
138
139
140
141
142
  // desc = multi-class classification application
  // descl2 = ``multiclass``, `softmax <https://en.wikipedia.org/wiki/Softmax_function>`__ objective function, aliases: ``softmax``
  // descl2 = ``multiclassova``, `One-vs-All <https://en.wikipedia.org/wiki/Multiclass_classification#One-vs.-rest>`__ binary objective function, aliases: ``multiclass_ova``, ``ova``, ``ovr``
  // descl2 = ``num_class`` should be set as well
  // desc = cross-entropy application
Guolin Ke's avatar
Guolin Ke committed
143
144
  // descl2 = ``cross_entropy``, objective function for cross-entropy (with optional linear weights), aliases: ``xentropy``
  // descl2 = ``cross_entropy_lambda``, alternative parameterization of cross-entropy, aliases: ``xentlambda``
145
  // descl2 = label is anything in interval [0, 1]
146
  // desc = ranking application
147
  // descl2 = ``lambdarank``, `lambdarank <https://papers.nips.cc/paper/2971-learning-to-rank-with-nonsmooth-cost-functions.pdf>`__ objective. `label_gain <#label_gain>`__ can be used to set the gain (weight) of ``int`` label and all values in ``label`` must be smaller than number of elements in ``label_gain``
148
149
  // descl2 = ``rank_xendcg``, `XE_NDCG_MART <https://arxiv.org/abs/1911.09798>`__ ranking objective function, aliases: ``xendcg``, ``xe_ndcg``, ``xe_ndcg_mart``, ``xendcg_mart``
  // descl2 = ``rank_xendcg`` is faster than and achieves the similar performance as ``lambdarank``
150
  // descl2 = label should be ``int`` type, and larger number represents the higher relevance (e.g. 0:bad, 1:fair, 2:good, 3:perfect)
Guolin Ke's avatar
Guolin Ke committed
151
152
153
  std::string objective = "regression";

  // [doc-only]
154
155
  // type = enum
  // alias = boosting_type, boost
156
  // options = gbdt, rf, dart
157
158
  // desc = ``gbdt``, traditional Gradient Boosting Decision Tree, aliases: ``gbrt``
  // desc = ``rf``, Random Forest, aliases: ``random_forest``
159
  // desc = ``dart``, `Dropouts meet Multiple Additive Regression Trees <https://arxiv.org/abs/1505.01866>`__
Nikita Titov's avatar
Nikita Titov committed
160
  // descl2 = **Note**: internally, LightGBM uses ``gbdt`` mode for the first ``1 / learning_rate`` iterations
Guolin Ke's avatar
Guolin Ke committed
161
162
  std::string boosting = "gbdt";

163
164
165
166
167
168
  // [doc-only]
  // type = enum
  // options = bagging, goss
  // desc = ``bagging``, Randomly Bagging Sampling
  // descl2 = **Note**: ``bagging`` is only effective when ``bagging_freq > 0`` and ``bagging_fraction < 1.0``
  // desc = ``goss``, Gradient-based One-Side Sampling
169
  // desc = *New in 4.0.0*
170
171
  std::string data_sample_strategy = "bagging";

172
  // alias = train, train_data, train_data_file, data_filename
173
  // desc = path of training data, LightGBM will train from this data
174
  // desc = **Note**: can be used only in CLI version
Guolin Ke's avatar
Guolin Ke committed
175
176
  std::string data = "";

177
  // alias = test, valid_data, valid_data_file, test_data, test_data_file, valid_filenames
178
  // default = ""
179
  // desc = path(s) of validation/test data, LightGBM will output metrics for these data
180
  // desc = support multiple validation data, separated by ``,``
181
  // desc = **Note**: can be used only in CLI version
Guolin Ke's avatar
Guolin Ke committed
182
183
  std::vector<std::string> valid;

184
  // alias = num_iteration, n_iter, num_tree, num_trees, num_round, num_rounds, nrounds, num_boost_round, n_estimators, max_iter
185
186
187
  // check = >=0
  // desc = number of boosting iterations
  // desc = **Note**: internally, LightGBM constructs ``num_class * num_iterations`` trees for multi-class classification problems
Guolin Ke's avatar
Guolin Ke committed
188
  int num_iterations = 100;
Guolin Ke's avatar
Guolin Ke committed
189

190
  // alias = shrinkage_rate, eta
191
  // check = >0.0
192
193
  // desc = shrinkage rate
  // desc = in ``dart``, it also affects on normalization weights of dropped trees
Guolin Ke's avatar
Guolin Ke committed
194
195
  double learning_rate = 0.1;

196
  // default = 31
197
  // alias = num_leaf, max_leaves, max_leaf, max_leaf_nodes
198
  // check = >1
199
  // check = <=131072
200
  // desc = max number of leaves in one tree
Guolin Ke's avatar
Guolin Ke committed
201
202
203
  int num_leaves = kDefaultNumLeaves;

  // [doc-only]
204
205
  // type = enum
  // options = serial, feature, data, voting
206
  // alias = tree, tree_type, tree_learner_type
207
208
209
210
  // desc = ``serial``, single machine tree learner
  // desc = ``feature``, feature parallel tree learner, aliases: ``feature_parallel``
  // desc = ``data``, data parallel tree learner, aliases: ``data_parallel``
  // desc = ``voting``, voting parallel tree learner, aliases: ``voting_parallel``
211
  // desc = refer to `Distributed Learning Guide <./Parallel-Learning-Guide.rst>`__ to get more details
Guolin Ke's avatar
Guolin Ke committed
212
213
  std::string tree_learner = "serial";

214
  // alias = num_thread, nthread, nthreads, n_jobs
215
  // desc = used only in ``train``, ``prediction`` and ``refit`` tasks or in correspondent functions of language-specific packages
Guolin Ke's avatar
Guolin Ke committed
216
  // desc = number of threads for LightGBM
217
218
219
220
  // desc = ``0`` means default number of threads in OpenMP
  // desc = for the best speed, set this to the number of **real CPU cores**, not the number of threads (most CPUs use `hyper-threading <https://en.wikipedia.org/wiki/Hyper-threading>`__ to generate 2 threads per CPU core)
  // desc = do not set it too large if your dataset is small (for instance, do not use 64 threads for a dataset with 10,000 rows)
  // desc = be aware a task manager or any similar CPU monitoring tool might report that cores not being fully utilized. **This is normal**
221
  // desc = for distributed learning, do not use all CPU cores because this will cause poor performance for the network communication
222
  // desc = **Note**: please **don't** change this during training, especially when running multiple jobs simultaneously by external packages, otherwise it may cause undesirable errors
Guolin Ke's avatar
Guolin Ke committed
223
224
225
  int num_threads = 0;

  // [doc-only]
226
  // type = enum
227
  // options = cpu, gpu, cuda
228
  // alias = device
229
230
231
232
  // desc = device for the tree learning
  // desc = ``cpu`` supports all LightGBM functionality and is portable across the widest range of operating systems and hardware
  // desc = ``cuda`` offers faster training than ``gpu`` or ``cpu``, but only works on GPUs supporting CUDA
  // desc = ``gpu`` can be faster than ``cpu`` and works on a wider range of GPUs than CUDA
233
234
235
  // desc = **Note**: it is recommended to use the smaller ``max_bin`` (e.g. 63) to get the better speed up
  // desc = **Note**: for the faster speed, GPU uses 32-bit float point to sum up by default, so this may affect the accuracy for some tasks. You can set ``gpu_use_dp=true`` to enable 64-bit float point, but it will slow down the training
  // desc = **Note**: refer to `Installation Guide <./Installation-Guide.rst#build-gpu-version>`__ to build LightGBM with GPU support
Guolin Ke's avatar
Guolin Ke committed
236
237
238
  std::string device_type = "cpu";

  // [doc-only]
239
  // alias = random_seed, random_state
240
241
242
243
  // default = None
  // desc = this seed is used to generate other seeds, e.g. ``data_random_seed``, ``feature_fraction_seed``, etc.
  // desc = by default, this seed is unused in favor of default values of other seeds
  // desc = this seed has lower priority in comparison with other seeds, which means that it will be overridden, if you set other seeds explicitly
Guolin Ke's avatar
Guolin Ke committed
244
245
  int seed = 0;

Guolin Ke's avatar
Guolin Ke committed
246
247
248
249
250
  // desc = used only with ``cpu`` device type
  // desc = setting this to ``true`` should ensure the stable results when using the same data and the same parameters (and different ``num_threads``)
  // desc = when you use the different seeds, different LightGBM versions, the binaries compiled by different compilers, or in different systems, the results are expected to be different
  // desc = you can `raise issues <https://github.com/microsoft/LightGBM/issues>`__ in LightGBM GitHub repo when you meet the unstable results
  // desc = **Note**: setting this to ``true`` may slow down the training
251
  // desc = **Note**: to avoid potential instability due to numerical issues, please set ``force_col_wise=true`` or ``force_row_wise=true`` when setting ``deterministic=true``
Guolin Ke's avatar
Guolin Ke committed
252
253
  bool deterministic = false;

254
  #ifndef __NVCC__
Guolin Ke's avatar
Guolin Ke committed
255
256
257
  #pragma endregion

  #pragma region Learning Control Parameters
258
  #endif  // __NVCC__
Guolin Ke's avatar
Guolin Ke committed
259

260
261
262
263
  // desc = used only with ``cpu`` device type
  // desc = set this to ``true`` to force col-wise histogram building
  // desc = enabling this is recommended when:
  // descl2 = the number of columns is large, or the total number of bins is large
Nikita Titov's avatar
Nikita Titov committed
264
  // descl2 = ``num_threads`` is large, e.g. ``> 20``
265
266
267
  // descl2 = you want to reduce memory cost
  // desc = **Note**: when both ``force_col_wise`` and ``force_row_wise`` are ``false``, LightGBM will firstly try them both, and then use the faster one. To remove the overhead of testing set the faster one to ``true`` manually
  // desc = **Note**: this parameter cannot be used at the same time with ``force_row_wise``, choose only one of them
268
269
  bool force_col_wise = false;

270
271
272
273
  // desc = used only with ``cpu`` device type
  // desc = set this to ``true`` to force row-wise histogram building
  // desc = enabling this is recommended when:
  // descl2 = the number of data points is large, and the total number of bins is relatively small
Nikita Titov's avatar
Nikita Titov committed
274
  // descl2 = ``num_threads`` is relatively small, e.g. ``<= 16``
275
  // descl2 = you want to use small ``bagging_fraction`` or ``goss`` sample strategy to speed up
276
277
278
  // desc = **Note**: setting this to ``true`` will double the memory cost for Dataset object. If you have not enough memory, you can try setting ``force_col_wise=true``
  // desc = **Note**: when both ``force_col_wise`` and ``force_row_wise`` are ``false``, LightGBM will firstly try them both, and then use the faster one. To remove the overhead of testing set the faster one to ``true`` manually
  // desc = **Note**: this parameter cannot be used at the same time with ``force_col_wise``, choose only one of them
279
280
  bool force_row_wise = false;

281
282
283
284
285
  // alias = hist_pool_size
  // desc = max cache size in MB for historical histogram
  // desc = ``< 0`` means no limit
  double histogram_pool_size = -1.0;

286
  // desc = limit the max depth for tree model. This is used to deal with over-fitting when ``#data`` is small. Tree still grows leaf-wise
287
  // desc = ``<= 0`` means no limit
Guolin Ke's avatar
Guolin Ke committed
288
289
  int max_depth = -1;

290
  // alias = min_data_per_leaf, min_data, min_child_samples, min_samples_leaf
291
292
  // check = >=0
  // desc = minimal number of data in one leaf. Can be used to deal with over-fitting
293
  // desc = **Note**: this is an approximation based on the Hessian, so occasionally you may observe splits which produce leaf nodes that have less than this many observations
Guolin Ke's avatar
Guolin Ke committed
294
295
  int min_data_in_leaf = 20;

296
297
298
  // alias = min_sum_hessian_per_leaf, min_sum_hessian, min_hessian, min_child_weight
  // check = >=0.0
  // desc = minimal sum hessian in one leaf. Like ``min_data_in_leaf``, it can be used to deal with over-fitting
Guolin Ke's avatar
Guolin Ke committed
299
300
  double min_sum_hessian_in_leaf = 1e-3;

301
302
303
304
305
306
307
  // alias = sub_row, subsample, bagging
  // check = >0.0
  // check = <=1.0
  // desc = like ``feature_fraction``, but this will randomly select part of data without resampling
  // desc = can be used to speed up training
  // desc = can be used to deal with over-fitting
  // desc = **Note**: to enable bagging, ``bagging_freq`` should be set to a non zero value as well
Guolin Ke's avatar
Guolin Ke committed
308
309
  double bagging_fraction = 1.0;

Guolin Ke's avatar
Guolin Ke committed
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
  // alias = pos_sub_row, pos_subsample, pos_bagging
  // check = >0.0
  // check = <=1.0
  // desc = used only in ``binary`` application
  // desc = used for imbalanced binary classification problem, will randomly sample ``#pos_samples * pos_bagging_fraction`` positive samples in bagging
  // desc = should be used together with ``neg_bagging_fraction``
  // desc = set this to ``1.0`` to disable
  // desc = **Note**: to enable this, you need to set ``bagging_freq`` and ``neg_bagging_fraction`` as well
  // desc = **Note**: if both ``pos_bagging_fraction`` and ``neg_bagging_fraction`` are set to ``1.0``,  balanced bagging is disabled
  // desc = **Note**: if balanced bagging is enabled, ``bagging_fraction`` will be ignored
  double pos_bagging_fraction = 1.0;

  // alias = neg_sub_row, neg_subsample, neg_bagging
  // check = >0.0
  // check = <=1.0
  // desc = used only in ``binary`` application
  // desc = used for imbalanced binary classification problem, will randomly sample ``#neg_samples * neg_bagging_fraction`` negative samples in bagging
  // desc = should be used together with ``pos_bagging_fraction``
  // desc = set this to ``1.0`` to disable
  // desc = **Note**: to enable this, you need to set ``bagging_freq`` and ``pos_bagging_fraction`` as well
  // desc = **Note**: if both ``pos_bagging_fraction`` and ``neg_bagging_fraction`` are set to ``1.0``,  balanced bagging is disabled
  // desc = **Note**: if balanced bagging is enabled, ``bagging_fraction`` will be ignored
  double neg_bagging_fraction = 1.0;

334
335
  // alias = subsample_freq
  // desc = frequency for bagging
336
  // desc = ``0`` means disable bagging; ``k`` means perform bagging at every ``k`` iteration. Every ``k``-th iteration, LightGBM will randomly select ``bagging_fraction * 100 %`` of the data to use for the next ``k`` iterations
337
  // desc = **Note**: bagging is only effective when ``0.0 < bagging_fraction < 1.0``
Guolin Ke's avatar
Guolin Ke committed
338
339
340
341
342
343
344
  int bagging_freq = 0;

  // alias = bagging_fraction_seed
  // desc = random seed for bagging
  int bagging_seed = 3;

  // alias = sub_feature, colsample_bytree
345
346
  // check = >0.0
  // check = <=1.0
347
  // desc = LightGBM will randomly select a subset of features on each iteration (tree) if ``feature_fraction`` is smaller than ``1.0``. For example, if you set it to ``0.8``, LightGBM will select 80% of features before training each tree
348
349
  // desc = can be used to speed up training
  // desc = can be used to deal with over-fitting
Guolin Ke's avatar
Guolin Ke committed
350
351
  double feature_fraction = 1.0;

352
353
354
  // alias = sub_feature_bynode, colsample_bynode
  // check = >0.0
  // check = <=1.0
355
  // desc = LightGBM will randomly select a subset of features on each tree node if ``feature_fraction_bynode`` is smaller than ``1.0``. For example, if you set it to ``0.8``, LightGBM will select 80% of features at each tree node
356
357
358
359
360
  // desc = can be used to deal with over-fitting
  // desc = **Note**: unlike ``feature_fraction``, this cannot speed up training
  // desc = **Note**: if both ``feature_fraction`` and ``feature_fraction_bynode`` are smaller than ``1.0``, the final fraction of each node is ``feature_fraction * feature_fraction_bynode``
  double feature_fraction_bynode = 1.0;

361
  // desc = random seed for ``feature_fraction``
Guolin Ke's avatar
Guolin Ke committed
362
363
  int feature_fraction_seed = 2;

Nikita Titov's avatar
Nikita Titov committed
364
  // alias = extra_tree
365
366
  // desc = use extremely randomized trees
  // desc = if set to ``true``, when evaluating node splits LightGBM will check only one randomly-chosen threshold for each feature
367
  // desc = can be used to speed up training
368
369
370
371
372
373
  // desc = can be used to deal with over-fitting
  bool extra_trees = false;

  // desc = random seed for selecting thresholds when ``extra_trees`` is true
  int extra_seed = 6;

374
  // alias = early_stopping_rounds, early_stopping, n_iter_no_change
375
376
  // desc = will stop training if one metric of one validation data doesn't improve in last ``early_stopping_round`` rounds
  // desc = ``<= 0`` means disable
377
  // desc = can be used to speed up training
Guolin Ke's avatar
Guolin Ke committed
378
379
  int early_stopping_round = 0;

380
  // desc = LightGBM allows you to provide multiple evaluation metrics. Set this to ``true``, if you want to use only the first metric for early stopping
381
382
  bool first_metric_only = false;

383
384
385
386
  // alias = max_tree_output, max_leaf_output
  // desc = used to limit the max output of tree leaves
  // desc = ``<= 0`` means no constraint
  // desc = the final max output of leaves is ``learning_rate * max_delta_step``
Guolin Ke's avatar
Guolin Ke committed
387
388
  double max_delta_step = 0.0;

389
  // alias = reg_alpha, l1_regularization
390
391
  // check = >=0.0
  // desc = L1 regularization
Guolin Ke's avatar
Guolin Ke committed
392
393
  double lambda_l1 = 0.0;

394
  // alias = reg_lambda, lambda, l2_regularization
395
  // check = >=0.0
Guolin Ke's avatar
Guolin Ke committed
396
397
398
  // desc = L2 regularization
  double lambda_l2 = 0.0;

399
  // check = >=0.0
400
  // desc = linear tree regularization, corresponds to the parameter ``lambda`` in Eq. 3 of `Gradient Boosting with Piece-Wise Linear Regression Trees <https://arxiv.org/pdf/1802.05640.pdf>`__
401
402
  double linear_lambda = 0.0;

403
404
405
  // alias = min_split_gain
  // check = >=0.0
  // desc = the minimal gain to perform split
406
  // desc = can be used to speed up training
Guolin Ke's avatar
Guolin Ke committed
407
408
  double min_gain_to_split = 0.0;

409
  // alias = rate_drop
410
411
412
  // check = >=0.0
  // check = <=1.0
  // desc = used only in ``dart``
413
  // desc = dropout rate: a fraction of previous trees to drop during the dropout
Guolin Ke's avatar
Guolin Ke committed
414
415
  double drop_rate = 0.1;

416
  // desc = used only in ``dart``
417
  // desc = max number of dropped trees during one boosting iteration
418
  // desc = ``<=0`` means no limit
Guolin Ke's avatar
Guolin Ke committed
419
420
  int max_drop = 50;

421
422
423
  // check = >=0.0
  // check = <=1.0
  // desc = used only in ``dart``
424
  // desc = probability of skipping the dropout procedure during a boosting iteration
Guolin Ke's avatar
Guolin Ke committed
425
426
  double skip_drop = 0.5;

427
428
  // desc = used only in ``dart``
  // desc = set this to ``true``, if you want to use xgboost dart mode
Guolin Ke's avatar
Guolin Ke committed
429
430
  bool xgboost_dart_mode = false;

431
432
  // desc = used only in ``dart``
  // desc = set this to ``true``, if you want to use uniform drop
Guolin Ke's avatar
Guolin Ke committed
433
434
  bool uniform_drop = false;

435
436
  // desc = used only in ``dart``
  // desc = random seed to choose dropping models
Guolin Ke's avatar
Guolin Ke committed
437
438
  int drop_seed = 4;

439
440
441
442
  // check = >=0.0
  // check = <=1.0
  // desc = used only in ``goss``
  // desc = the retain ratio of large gradient data
Guolin Ke's avatar
Guolin Ke committed
443
444
  double top_rate = 0.2;

445
446
447
448
  // check = >=0.0
  // check = <=1.0
  // desc = used only in ``goss``
  // desc = the retain ratio of small gradient data
Guolin Ke's avatar
Guolin Ke committed
449
450
  double other_rate = 0.1;

451
452
  // check = >0
  // desc = minimal number of data per categorical group
Guolin Ke's avatar
Guolin Ke committed
453
454
  int min_data_per_group = 100;

455
456
  // check = >0
  // desc = used for the categorical features
457
458
  // desc = limit number of split points considered for categorical features. See `the documentation on how LightGBM finds optimal splits for categorical features <./Features.rst#optimal-split-for-categorical-features>`_ for more details
  // desc = can be used to speed up training
Guolin Ke's avatar
Guolin Ke committed
459
460
  int max_cat_threshold = 32;

461
462
  // check = >=0.0
  // desc = used for the categorical features
463
  // desc = L2 regularization in categorical split
464
  double cat_l2 = 10.0;
Guolin Ke's avatar
Guolin Ke committed
465

466
467
468
469
  // check = >=0.0
  // desc = used for the categorical features
  // desc = this can reduce the effect of noises in categorical features, especially for categories with few data
  double cat_smooth = 10.0;
470

471
472
  // check = >0
  // desc = when number of categories of one feature smaller than or equal to ``max_cat_to_onehot``, one-vs-other split algorithm will be used
Guolin Ke's avatar
Guolin Ke committed
473
474
475
  int max_cat_to_onehot = 4;

  // alias = topk
476
  // check = >0
477
  // desc = used only in ``voting`` tree learner, refer to `Voting parallel <./Parallel-Learning-Guide.rst#choose-appropriate-parallel-algorithm>`__
478
  // desc = set this to larger value for more accurate result, but it will slow down the training speed
Guolin Ke's avatar
Guolin Ke committed
479
480
481
  int top_k = 20;

  // type = multi-int
482
  // alias = mc, monotone_constraint, monotonic_cst
483
484
485
486
  // default = None
  // desc = used for constraints of monotonic features
  // desc = ``1`` means increasing, ``-1`` means decreasing, ``0`` means non-constraint
  // desc = you need to specify all features in order. For example, ``mc=-1,0,1`` means decreasing for 1st feature, non-constraint for 2nd feature and increasing for the 3rd feature
Guolin Ke's avatar
Guolin Ke committed
487
  std::vector<int8_t> monotone_constraints;
Guolin Ke's avatar
Guolin Ke committed
488

Nikita Titov's avatar
Nikita Titov committed
489
  // type = enum
490
  // alias = monotone_constraining_method, mc_method
491
  // options = basic, intermediate, advanced
492
493
494
  // desc = used only if ``monotone_constraints`` is set
  // desc = monotone constraints method
  // descl2 = ``basic``, the most basic monotone constraints method. It does not slow the library at all, but over-constrains the predictions
495
496
  // descl2 = ``intermediate``, a `more advanced method <https://hal.archives-ouvertes.fr/hal-02862802/document>`__, which may slow the library very slightly. However, this method is much less constraining than the basic method and should significantly improve the results
  // descl2 = ``advanced``, an `even more advanced method <https://hal.archives-ouvertes.fr/hal-02862802/document>`__, which may slow the library. However, this method is even less constraining than the intermediate method and should again significantly improve the results
497
498
  std::string monotone_constraints_method = "basic";

499
500
501
  // alias = monotone_splits_penalty, ms_penalty, mc_penalty
  // check = >=0.0
  // desc = used only if ``monotone_constraints`` is set
502
  // desc = `monotone penalty <https://hal.archives-ouvertes.fr/hal-02862802/document>`__: a penalization parameter X forbids any monotone splits on the first X (rounded down) level(s) of the tree. The penalty applied to monotone splits on a given depth is a continuous, increasing function the penalization parameter
503
504
505
  // desc = if ``0.0`` (the default), no penalization is applied
  double monotone_penalty = 0.0;

Guolin Ke's avatar
Guolin Ke committed
506
  // type = multi-double
507
  // alias = feature_contrib, fc, fp, feature_penalty
Guolin Ke's avatar
Guolin Ke committed
508
509
510
511
  // default = None
  // desc = used to control feature's split gain, will use ``gain[i] = max(0, feature_contri[i]) * gain[i]`` to replace the split gain of i-th feature
  // desc = you need to specify all features in order
  std::vector<double> feature_contri;
512

513
514
515
516
  // alias = fs, forced_splits_filename, forced_splits_file, forced_splits
  // desc = path to a ``.json`` file that specifies splits to force at the top of every decision tree before best-first learning commences
  // desc = ``.json`` file can be arbitrarily nested, and each split contains ``feature``, ``threshold`` fields, as well as ``left`` and ``right`` fields representing subsplits
  // desc = categorical splits are forced in a one-hot fashion, with ``left`` representing the split containing the feature value and ``right`` representing other values
517
  // desc = **Note**: the forced split logic will be ignored, if the split makes gain worse
518
  // desc = see `this file <https://github.com/microsoft/LightGBM/tree/master/examples/binary_classification/forced_splits.json>`__ as an example
Guolin Ke's avatar
Guolin Ke committed
519
520
  std::string forcedsplits_filename = "";

Guolin Ke's avatar
Guolin Ke committed
521
522
523
524
525
526
  // check = >=0.0
  // check = <=1.0
  // desc = decay rate of ``refit`` task, will use ``leaf_output = refit_decay_rate * old_leaf_output + (1.0 - refit_decay_rate) * new_leaf_output`` to refit trees
  // desc = used only in ``refit`` task in CLI version or as argument in ``refit`` function in language-specific package
  double refit_decay_rate = 0.9;

527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
  // check = >=0.0
  // desc = cost-effective gradient boosting multiplier for all penalties
  double cegb_tradeoff = 1.0;

  // check = >=0.0
  // desc = cost-effective gradient-boosting penalty for splitting a node
  double cegb_penalty_split = 0.0;

  // type = multi-double
  // default = 0,0,...,0
  // desc = cost-effective gradient boosting penalty for using a feature
  // desc = applied per data point
  std::vector<double> cegb_penalty_feature_lazy;

  // type = multi-double
  // default = 0,0,...,0
  // desc = cost-effective gradient boosting penalty for using a feature
  // desc = applied once per forest
545
  std::vector<double> cegb_penalty_feature_coupled;
546

Belinda Trotta's avatar
Belinda Trotta committed
547
548
549
550
551
  // check = >= 0.0
  // desc = controls smoothing applied to tree nodes
  // desc = helps prevent overfitting on leaves with few samples
  // desc = if set to zero, no smoothing is applied
  // desc = if ``path_smooth > 0`` then ``min_data_in_leaf`` must be at least ``2``
552
  // desc = larger values give stronger regularization
553
  // descl2 = the weight of each node is ``w * (n / path_smooth) / (n / path_smooth + 1) + w_p / (n / path_smooth + 1)``, where ``n`` is the number of samples in the node, ``w`` is the optimal node weight to minimise the loss (approximately ``-sum_gradients / sum_hessians``), and ``w_p`` is the weight of the parent node
Belinda Trotta's avatar
Belinda Trotta committed
554
555
556
  // descl2 = note that the parent output ``w_p`` itself has smoothing applied, unless it is the root node, so that the smoothing effect accumulates with the tree depth
  double path_smooth = 0;

557
558
559
560
  // desc = controls which features can appear in the same branch
  // desc = by default interaction constraints are disabled, to enable them you can specify
  // descl2 = for CLI, lists separated by commas, e.g. ``[0,1,2],[2,3]``
  // descl2 = for Python-package, list of lists, e.g. ``[[0, 1, 2], [2, 3]]``
561
  // descl2 = for R-package, list of character or numeric vectors, e.g. ``list(c("var1", "var2", "var3"), c("var3", "var4"))`` or ``list(c(1L, 2L, 3L), c(3L, 4L))``. Numeric vectors should use 1-based indexing, where ``1L`` is the first feature, ``2L`` is the second feature, etc
562
563
564
  // desc = any two features can only appear in the same branch only if there exists a constraint containing both features
  std::string interaction_constraints = "";

565
566
  // alias = verbose
  // desc = controls the level of LightGBM's verbosity
567
  // desc = ``< 0``: Fatal, ``= 0``: Error (Warning), ``= 1``: Info, ``> 1``: Debug
568
569
  int verbosity = 1;

570
  // [no-save]
571
572
573
574
575
576
577
  // alias = model_input, model_in
  // desc = filename of input model
  // desc = for ``prediction`` task, this model will be applied to prediction data
  // desc = for ``train`` task, training will be continued from this model
  // desc = **Note**: can be used only in CLI version
  std::string input_model = "";

578
  // [no-save]
579
580
581
582
583
  // alias = model_output, model_out
  // desc = filename of output model in training
  // desc = **Note**: can be used only in CLI version
  std::string output_model = "LightGBM_model.txt";

584
585
586
587
588
  // desc = the feature importance type in the saved model file
  // desc = ``0``: count-based feature importance (numbers of splits are counted); ``1``: gain-based feature importance (values of gain are counted)
  // desc = **Note**: can be used only in CLI version
  int saved_feature_importance_type = 0;

589
  // [no-save]
590
591
592
593
594
595
  // alias = save_period
  // desc = frequency of saving model file snapshot
  // desc = set this to positive value to enable this function. For example, the model file will be snapshotted at each iteration if ``snapshot_freq=1``
  // desc = **Note**: can be used only in CLI version
  int snapshot_freq = -1;

596
597
598
599
600
601
  // [no-save]
  // desc = whether to use gradient quantization when training
  // desc = enabling this will discretize (quantize) the gradients and hessians into bins of ``num_grad_quant_bins``
  // desc = with quantized training, most arithmetics in the training process will be integer operations
  // desc = gradient quantization can accelerate training, with little accuracy drop in most cases
  // desc = **Note**: can be used only with ``device_type = cpu``
602
  // desc = *New in version 4.0.0*
603
604
605
606
607
608
  bool use_quantized_grad = false;

  // [no-save]
  // desc = number of bins to quantization gradients and hessians
  // desc = with more bins, the quantized training will be closer to full precision training
  // desc = **Note**: can be used only with ``device_type = cpu``
609
  // desc = *New in 4.0.0*
610
611
612
613
614
615
  int num_grad_quant_bins = 4;

  // [no-save]
  // desc = whether to renew the leaf values with original gradients when quantized training
  // desc = renewing is very helpful for good quantized training accuracy for ranking objectives
  // desc = **Note**: can be used only with ``device_type = cpu``
616
  // desc = *New in 4.0.0*
617
618
619
620
  bool quant_train_renew_leaf = false;

  // [no-save]
  // desc = whether to use stochastic rounding in gradient quantization
621
  // desc = *New in 4.0.0*
622
623
  bool stochastic_rounding = true;

624
  #ifndef __NVCC__
625
626
627
628
629
  #pragma endregion

  #pragma region IO Parameters

  #pragma region Dataset Parameters
630
  #endif  // __NVCC__
631

Nikita Titov's avatar
Nikita Titov committed
632
633
634
635
  // alias = linear_trees
  // desc = fit piecewise linear gradient boosting tree
  // descl2 = tree splits are chosen in the usual way, but the model at each leaf is linear instead of constant
  // descl2 = the linear model at each leaf includes all the numerical features in that leaf's branch
636
  // descl2 = the first tree has constant leaf values
Nikita Titov's avatar
Nikita Titov committed
637
638
639
640
641
642
643
644
645
  // descl2 = categorical features are used for splits as normal but are not used in the linear models
  // descl2 = missing values should not be encoded as ``0``. Use ``np.nan`` for Python, ``NA`` for the CLI, and ``NA``, ``NA_real_``, or ``NA_integer_`` for R
  // descl2 = it is recommended to rescale data before training so that features have similar mean and standard deviation
  // descl2 = **Note**: only works with CPU and ``serial`` tree learner
  // descl2 = **Note**: ``regression_l1`` objective is not supported with linear tree boosting
  // descl2 = **Note**: setting ``linear_tree=true`` significantly increases the memory use of LightGBM
  // descl2 = **Note**: if you specify ``monotone_constraints``, constraints will be enforced when choosing the split points, but not when fitting the linear models on leaves
  bool linear_tree = false;

646
  // alias = max_bins
647
648
649
650
  // check = >1
  // desc = max number of bins that feature values will be bucketed in
  // desc = small number of bins may reduce training accuracy but may increase general power (deal with over-fitting)
  // desc = LightGBM will auto compress memory according to ``max_bin``. For example, LightGBM will use ``uint8_t`` for feature value if ``max_bin=255``
651
  int max_bin = 255;
Guolin Ke's avatar
Guolin Ke committed
652

Belinda Trotta's avatar
Belinda Trotta committed
653
654
655
656
  // type = multi-int
  // default = None
  // desc = max number of bins for each feature
  // desc = if not specified, will use ``max_bin`` for all features
657
  std::vector<int32_t> max_bin_by_feature;
Belinda Trotta's avatar
Belinda Trotta committed
658

659
660
661
  // check = >0
  // desc = minimal number of data inside one bin
  // desc = use this to avoid one-data-one-bin (potential over-fitting)
Guolin Ke's avatar
Guolin Ke committed
662
663
  int min_data_in_bin = 3;

664
665
  // alias = subsample_for_bin
  // check = >0
666
667
  // desc = number of data that sampled to construct feature discrete bins
  // desc = setting this to larger value will give better training result, but may increase data loading time
668
  // desc = set this to larger value if data is very sparse
669
  // desc = **Note**: don't set this to small values, otherwise, you may encounter unexpected errors and poor accuracy
670
671
  int bin_construct_sample_cnt = 200000;

672
  // alias = data_seed
673
  // desc = random seed for sampling data to construct histogram bins
Guolin Ke's avatar
Guolin Ke committed
674
  int data_random_seed = 1;
Guolin Ke's avatar
Guolin Ke committed
675

676
677
678
  // alias = is_sparse, enable_sparse, sparse
  // desc = used to enable/disable sparse optimization
  bool is_enable_sparse = true;
Guolin Ke's avatar
Guolin Ke committed
679

680
681
682
683
684
685
686
687
  // alias = is_enable_bundle, bundle
  // desc = set this to ``false`` to disable Exclusive Feature Bundling (EFB), which is described in `LightGBM: A Highly Efficient Gradient Boosting Decision Tree <https://papers.nips.cc/paper/6907-lightgbm-a-highly-efficient-gradient-boosting-decision-tree>`__
  // desc = **Note**: disabling this may cause the slow training speed for sparse datasets
  bool enable_bundle = true;

  // desc = set this to ``false`` to disable the special handle of missing value
  bool use_missing = true;

688
  // desc = set this to ``true`` to treat all zero as missing values (including the unshown values in LibSVM / sparse matrices)
689
690
691
  // desc = set this to ``false`` to use ``na`` for representing missing values
  bool zero_as_missing = false;

692
  // desc = set this to ``true`` (the default) to tell LightGBM to ignore the features that are unsplittable based on ``min_data_in_leaf``
693
694
695
696
697
  // desc = as dataset object is initialized only once and cannot be changed after that, you may need to set this to ``false`` when searching parameters with ``min_data_in_leaf``, otherwise features are filtered by ``min_data_in_leaf`` firstly if you don't reconstruct dataset object
  // desc = **Note**: setting this to ``false`` may slow down the training
  bool feature_pre_filter = true;

  // alias = is_pre_partition
698
  // desc = used for distributed learning (excluding the ``feature_parallel`` mode)
699
700
701
  // desc = ``true`` if training data are pre-partitioned, and different machines use different partitions
  bool pre_partition = false;

702
703
704
  // alias = two_round_loading, use_two_round_loading
  // desc = set this to ``true`` if data file is too big to fit in memory
  // desc = by default, LightGBM will map data file to memory and load features from memory. This will provide faster data loading speed, but may cause run out of memory error when the data file is very big
705
  // desc = **Note**: works only in case of loading data directly from text file
Guolin Ke's avatar
Guolin Ke committed
706
707
708
  bool two_round = false;

  // alias = has_header
709
  // desc = set this to ``true`` if input data has header
710
  // desc = **Note**: works only in case of loading data directly from text file
Guolin Ke's avatar
Guolin Ke committed
711
712
  bool header = false;

713
714
715
716
717
  // type = int or string
  // alias = label
  // desc = used to specify the label column
  // desc = use number for index, e.g. ``label=0`` means column\_0 is the label
  // desc = add a prefix ``name:`` for column name, e.g. ``label=name:is_click``
718
  // desc = if omitted, the first column in the training data is used as the label
719
  // desc = **Note**: works only in case of loading data directly from text file
Guolin Ke's avatar
Guolin Ke committed
720
  std::string label_column = "";
Guolin Ke's avatar
Guolin Ke committed
721

722
723
724
725
726
  // type = int or string
  // alias = weight
  // desc = used to specify the weight column
  // desc = use number for index, e.g. ``weight=0`` means column\_0 is the weight
  // desc = add a prefix ``name:`` for column name, e.g. ``weight=name:weight``
727
  // desc = **Note**: works only in case of loading data directly from text file
728
  // desc = **Note**: index starts from ``0`` and it doesn't count the label column when passing type is ``int``, e.g. when label is column\_0, and weight is column\_1, the correct parameter is ``weight=0``
729
  // desc = **Note**: weights should be non-negative
Guolin Ke's avatar
Guolin Ke committed
730
  std::string weight_column = "";
Guolin Ke's avatar
Guolin Ke committed
731

732
733
734
735
736
  // type = int or string
  // alias = group, group_id, query_column, query, query_id
  // desc = used to specify the query/group id column
  // desc = use number for index, e.g. ``query=0`` means column\_0 is the query id
  // desc = add a prefix ``name:`` for column name, e.g. ``query=name:query_id``
737
  // desc = **Note**: works only in case of loading data directly from text file
738
  // desc = **Note**: data should be grouped by query\_id, for more information, see `Query Data <#query-data>`__
739
  // desc = **Note**: index starts from ``0`` and it doesn't count the label column when passing type is ``int``, e.g. when label is column\_0 and query\_id is column\_1, the correct parameter is ``query=0``
Guolin Ke's avatar
Guolin Ke committed
740
  std::string group_column = "";
Guolin Ke's avatar
Guolin Ke committed
741

742
  // type = multi-int or string
Guolin Ke's avatar
Guolin Ke committed
743
  // alias = ignore_feature, blacklist
744
745
746
  // desc = used to specify some ignoring columns in training
  // desc = use number for index, e.g. ``ignore_column=0,1,2`` means column\_0, column\_1 and column\_2 will be ignored
  // desc = add a prefix ``name:`` for column name, e.g. ``ignore_column=name:c1,c2,c3`` means c1, c2 and c3 will be ignored
747
  // desc = **Note**: works only in case of loading data directly from text file
748
  // desc = **Note**: index starts from ``0`` and it doesn't count the label column when passing type is ``int``
749
  // desc = **Note**: despite the fact that specified columns will be completely ignored during the training, they still should have a valid format allowing LightGBM to load file successfully
Guolin Ke's avatar
Guolin Ke committed
750
  std::string ignore_column = "";
751

752
  // type = multi-int or string
753
  // alias = cat_feature, categorical_column, cat_column, categorical_features
754
755
756
  // desc = used to specify categorical features
  // desc = use number for index, e.g. ``categorical_feature=0,1,2`` means column\_0, column\_1 and column\_2 are categorical features
  // desc = add a prefix ``name:`` for column name, e.g. ``categorical_feature=name:c1,c2,c3`` means c1, c2 and c3 are categorical features
757
  // desc = **Note**: all values will be cast to ``int32`` (integer codes will be extracted from pandas categoricals in the Python-package)
758
759
  // desc = **Note**: index starts from ``0`` and it doesn't count the label column when passing type is ``int``
  // desc = **Note**: all values should be less than ``Int32.MaxValue`` (2147483647)
760
  // desc = **Note**: using large values could be memory consuming. Tree decision rule works best when categorical features are presented by consecutive integers starting from zero
761
  // desc = **Note**: all negative values will be treated as **missing values**
762
  // desc = **Note**: the output cannot be monotonically constrained with respect to a categorical feature
763
  // desc = **Note**: floating point numbers in categorical features will be rounded towards 0
Guolin Ke's avatar
Guolin Ke committed
764
765
  std::string categorical_feature = "";

766
767
768
769
770
  // desc = path to a ``.json`` file that specifies bin upper bounds for some or all features
  // desc = ``.json`` file should contain an array of objects, each containing the word ``feature`` (integer feature index) and ``bin_upper_bound`` (array of thresholds for binning)
  // desc = see `this file <https://github.com/microsoft/LightGBM/tree/master/examples/regression/forced_bins.json>`__ as an example
  std::string forcedbins_filename = "";

771
  // [no-save]
772
773
774
775
776
777
  // alias = is_save_binary, is_save_binary_file
  // desc = if ``true``, LightGBM will save the dataset (including validation data) to a binary file. This speed ups the data loading for the next time
  // desc = **Note**: ``init_score`` is not saved in binary file
  // desc = **Note**: can be used only in CLI version; for language-specific packages you can use the correspondent function
  bool save_binary = false;

Chen Yufei's avatar
Chen Yufei committed
778
779
780
781
  // desc = use precise floating point number parsing for text parser (e.g. CSV, TSV, LibSVM input)
  // desc = **Note**: setting this to ``true`` may lead to much slower text parsing
  bool precise_float_parser = false;

782
783
784
  // desc = path to a ``.json`` file that specifies customized parser initialized configuration
  // desc = see `lightgbm-transform <https://github.com/microsoft/lightgbm-transform>`__ for usage examples
  // desc = **Note**: ``lightgbm-transform`` is not maintained by LightGBM's maintainers. Bug reports or feature requests should go to `issues page <https://github.com/microsoft/lightgbm-transform/issues>`__
785
  // desc = *New in 4.0.0*
786
787
  std::string parser_config_file = "";

788
  #ifndef __NVCC__
789
790
791
  #pragma endregion

  #pragma region Predict Parameters
792
  #endif  // __NVCC__
793

794
795
796
797
798
799
  // [no-save]
  // desc = used only in ``prediction`` task
  // desc = used to specify from which iteration to start the prediction
  // desc = ``<= 0`` means from the first iteration
  int start_iteration_predict = 0;

800
  // [no-save]
801
802
803
804
805
  // desc = used only in ``prediction`` task
  // desc = used to specify how many trained iterations will be used in prediction
  // desc = ``<= 0`` means no limit
  int num_iteration_predict = -1;

806
  // [no-save]
807
808
809
810
  // alias = is_predict_raw_score, predict_rawscore, raw_score
  // desc = used only in ``prediction`` task
  // desc = set this to ``true`` to predict only the raw scores
  // desc = set this to ``false`` to predict transformed scores
Guolin Ke's avatar
Guolin Ke committed
811
812
  bool predict_raw_score = false;

813
  // [no-save]
814
815
816
  // alias = is_predict_leaf_index, leaf_index
  // desc = used only in ``prediction`` task
  // desc = set this to ``true`` to predict with leaf index of all trees
Guolin Ke's avatar
Guolin Ke committed
817
818
  bool predict_leaf_index = false;

819
  // [no-save]
820
821
  // alias = is_predict_contrib, contrib
  // desc = used only in ``prediction`` task
822
  // desc = set this to ``true`` to estimate `SHAP values <https://arxiv.org/abs/1706.06060>`__, which represent how each feature contributes to each prediction
823
  // desc = produces ``#features + 1`` values where the last value is the expected value of the model output over the training data
824
  // desc = **Note**: if you want to get more explanation for your model's predictions using SHAP values like SHAP interaction values, you can install `shap package <https://github.com/slundberg/shap>`__
Nikita Titov's avatar
Nikita Titov committed
825
  // desc = **Note**: unlike the shap package, with ``predict_contrib`` we return a matrix with an extra column, where the last column is the expected value
826
  // desc = **Note**: this feature is not implemented for linear trees
Guolin Ke's avatar
Guolin Ke committed
827
828
  bool predict_contrib = false;

829
  // [no-save]
830
  // desc = used only in ``prediction`` task
831
832
833
834
835
  // desc = control whether or not LightGBM raises an error when you try to predict on data with a different number of features than the training data
  // desc = if ``false`` (the default), a fatal error will be raised if the number of features in the dataset you predict on differs from the number seen during training
  // desc = if ``true``, LightGBM will attempt to predict on whatever data you provide. This is dangerous because you might get incorrect predictions, but you could use it in situations where it is difficult or expensive to generate some features and you are very confident that they were never chosen for splits in the model
  // desc = **Note**: be very careful setting this parameter to ``true``
  bool predict_disable_shape_check = false;
Guolin Ke's avatar
Guolin Ke committed
836

837
  // [no-save]
838
  // desc = used only in ``prediction`` task
839
  // desc = used only in ``classification`` and ``ranking`` applications
840
  // desc = used only for predicting normal or raw scores
841
  // desc = if ``true``, will use early-stopping to speed up the prediction. May affect the accuracy
842
  // desc = **Note**: cannot be used with ``rf`` boosting type or custom objective function
843
  bool pred_early_stop = false;
844

845
  // [no-save]
846
847
  // desc = used only in ``prediction`` task
  // desc = the frequency of checking early-stopping prediction
848
  int pred_early_stop_freq = 10;
Guolin Ke's avatar
Guolin Ke committed
849

850
  // [no-save]
851
852
  // desc = used only in ``prediction`` task
  // desc = the threshold of margin in early-stopping prediction
Guolin Ke's avatar
Guolin Ke committed
853
  double pred_early_stop_margin = 10.0;
Guolin Ke's avatar
Guolin Ke committed
854

855
  // [no-save]
856
  // alias = predict_result, prediction_result, predict_name, prediction_name, pred_name, name_pred
857
  // desc = used only in ``prediction`` task
858
859
860
861
  // desc = filename of prediction result
  // desc = **Note**: can be used only in CLI version
  std::string output_result = "LightGBM_predict_result.txt";

862
  #ifndef __NVCC__
863
864
865
  #pragma endregion

  #pragma region Convert Parameters
866
  #endif  // __NVCC__
867

868
  // [no-save]
869
  // desc = used only in ``convert_model`` task
870
  // desc = only ``cpp`` is supported yet; for conversion model to other languages consider using `m2cgen <https://github.com/BayesWitnesses/m2cgen>`__ utility
871
  // desc = if ``convert_model_language`` is set and ``task=train``, the model will be also converted
872
  // desc = **Note**: can be used only in CLI version
Guolin Ke's avatar
Guolin Ke committed
873
874
  std::string convert_model_language = "";

875
  // [no-save]
876
877
878
  // alias = convert_model_file
  // desc = used only in ``convert_model`` task
  // desc = output filename of converted model
879
  // desc = **Note**: can be used only in CLI version
Guolin Ke's avatar
Guolin Ke committed
880
881
  std::string convert_model = "gbdt_prediction.cpp";

882
  #ifndef __NVCC__
883
  #pragma endregion
Guolin Ke's avatar
Guolin Ke committed
884

885
886
  #pragma endregion

Guolin Ke's avatar
Guolin Ke committed
887
  #pragma region Objective Parameters
888
  #endif  // __NVCC__
Guolin Ke's avatar
Guolin Ke committed
889

890
  // desc = used only in ``rank_xendcg`` objective
891
892
893
  // desc = random seed for objectives, if random process is needed
  int objective_seed = 5;

894
895
896
897
  // check = >0
  // alias = num_classes
  // desc = used only in ``multi-class`` classification application
  int num_class = 1;
Guolin Ke's avatar
Guolin Ke committed
898

899
  // alias = unbalance, unbalanced_sets
900
  // desc = used only in ``binary`` and ``multiclassova`` applications
901
  // desc = set this to ``true`` if training data are unbalanced
902
  // desc = **Note**: while enabling this should increase the overall performance metric of your model, it will also result in poor estimates of the individual class probabilities
903
904
  // desc = **Note**: this parameter cannot be used at the same time with ``scale_pos_weight``, choose only **one** of them
  bool is_unbalance = false;
Guolin Ke's avatar
Guolin Ke committed
905

906
  // check = >0.0
907
  // desc = used only in ``binary`` and ``multiclassova`` applications
908
  // desc = weight of labels with positive class
909
  // desc = **Note**: while enabling this should increase the overall performance metric of your model, it will also result in poor estimates of the individual class probabilities
910
911
  // desc = **Note**: this parameter cannot be used at the same time with ``is_unbalance``, choose only **one** of them
  double scale_pos_weight = 1.0;
Guolin Ke's avatar
Guolin Ke committed
912

913
914
915
916
  // check = >0.0
  // desc = used only in ``binary`` and ``multiclassova`` classification and in ``lambdarank`` applications
  // desc = parameter for the sigmoid function
  double sigmoid = 1.0;
Guolin Ke's avatar
Guolin Ke committed
917

918
  // desc = used only in ``regression``, ``binary``, ``multiclassova`` and ``cross-entropy`` applications
919
  // desc = adjusts initial score to the mean of labels for faster convergence
Guolin Ke's avatar
Guolin Ke committed
920
921
  bool boost_from_average = true;

922
923
924
925
  // desc = used only in ``regression`` application
  // desc = used to fit ``sqrt(label)`` instead of original values and prediction result will be also automatically converted to ``prediction^2``
  // desc = might be useful in case of large-range labels
  bool reg_sqrt = false;
Guolin Ke's avatar
Guolin Ke committed
926

927
928
929
930
  // check = >0.0
  // desc = used only in ``huber`` and ``quantile`` ``regression`` applications
  // desc = parameter for `Huber loss <https://en.wikipedia.org/wiki/Huber_loss>`__ and `Quantile regression <https://en.wikipedia.org/wiki/Quantile_regression>`__
  double alpha = 0.9;
Guolin Ke's avatar
Guolin Ke committed
931

932
933
934
935
  // check = >0.0
  // desc = used only in ``fair`` ``regression`` application
  // desc = parameter for `Fair loss <https://www.kaggle.com/c/allstate-claims-severity/discussion/24520>`__
  double fair_c = 1.0;
Guolin Ke's avatar
Guolin Ke committed
936

937
938
939
940
  // check = >0.0
  // desc = used only in ``poisson`` ``regression`` application
  // desc = parameter for `Poisson regression <https://en.wikipedia.org/wiki/Poisson_regression>`__ to safeguard optimization
  double poisson_max_delta_step = 0.7;
Guolin Ke's avatar
Guolin Ke committed
941

942
943
944
945
946
947
948
  // check = >=1.0
  // check = <2.0
  // desc = used only in ``tweedie`` ``regression`` application
  // desc = used to control the variance of the tweedie distribution
  // desc = set this closer to ``2`` to shift towards a **Gamma** distribution
  // desc = set this closer to ``1`` to shift towards a **Poisson** distribution
  double tweedie_variance_power = 1.5;
Guolin Ke's avatar
Guolin Ke committed
949

950
951
  // check = >0
  // desc = used only in ``lambdarank`` application
Nikita Titov's avatar
Nikita Titov committed
952
953
  // desc = controls the number of top-results to focus on during training, refer to "truncation level" in the Sec. 3 of `LambdaMART paper <https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/MSR-TR-2010-82.pdf>`__
  // desc = this parameter is closely related to the desirable cutoff ``k`` in the metric **NDCG@k** that we aim at optimizing the ranker for. The optimal setting for this parameter is likely to be slightly higher than ``k`` (e.g., ``k + 3``) to include more pairs of documents to train on, but perhaps not too high to avoid deviating too much from the desired target metric **NDCG@k**
954
  int lambdarank_truncation_level = 30;
Guolin Ke's avatar
Guolin Ke committed
955

956
957
  // desc = used only in ``lambdarank`` application
  // desc = set this to ``true`` to normalize the lambdas for different queries, and improve the performance for unbalanced data
958
959
  // desc = set this to ``false`` to enforce the original lambdarank algorithm
  bool lambdarank_norm = true;
960

961
962
963
964
965
966
967
  // type = multi-double
  // default = 0,1,3,7,15,31,63,...,2^30-1
  // desc = used only in ``lambdarank`` application
  // desc = relevant gain for labels. For example, the gain of label ``2`` is ``3`` in case of default label gains
  // desc = separate by ``,``
  std::vector<double> label_gain;

968
969
  // check = >=0.0
  // desc = used only in ``lambdarank`` application when positional information is provided and position bias is modeled. Larger values reduce the inferred position bias factors.
James Lamb's avatar
James Lamb committed
970
  // desc = *New in version 4.1.0*
971
972
  double lambdarank_position_bias_regularization = 0.0;

973
  #ifndef __NVCC__
Guolin Ke's avatar
Guolin Ke committed
974
975
976
  #pragma endregion

  #pragma region Metric Parameters
977
  #endif  // __NVCC__
978

Guolin Ke's avatar
Guolin Ke committed
979
  // [doc-only]
980
981
982
  // alias = metrics, metric_types
  // default = ""
  // type = multi-enum
983
  // desc = metric(s) to be evaluated on the evaluation set(s)
984
  // descl2 = ``""`` (empty string or not specified) means that metric corresponding to specified ``objective`` will be used (this is possible only for pre-defined objective functions, otherwise no evaluation metric will be added)
985
  // descl2 = ``"None"`` (string, **not** a ``None`` value) means that no metric will be registered, aliases: ``na``, ``null``, ``custom``
986
987
  // descl2 = ``l1``, absolute loss, aliases: ``mean_absolute_error``, ``mae``, ``regression_l1``
  // descl2 = ``l2``, square loss, aliases: ``mean_squared_error``, ``mse``, ``regression_l2``, ``regression``
988
  // descl2 = ``rmse``, root square loss, aliases: ``root_mean_squared_error``, ``l2_root``
989
990
991
992
993
994
995
996
  // descl2 = ``quantile``, `Quantile regression <https://en.wikipedia.org/wiki/Quantile_regression>`__
  // descl2 = ``mape``, `MAPE loss <https://en.wikipedia.org/wiki/Mean_absolute_percentage_error>`__, aliases: ``mean_absolute_percentage_error``
  // descl2 = ``huber``, `Huber loss <https://en.wikipedia.org/wiki/Huber_loss>`__
  // descl2 = ``fair``, `Fair loss <https://www.kaggle.com/c/allstate-claims-severity/discussion/24520>`__
  // descl2 = ``poisson``, negative log-likelihood for `Poisson regression <https://en.wikipedia.org/wiki/Poisson_regression>`__
  // descl2 = ``gamma``, negative log-likelihood for **Gamma** regression
  // descl2 = ``gamma_deviance``, residual deviance for **Gamma** regression
  // descl2 = ``tweedie``, negative log-likelihood for **Tweedie** regression
997
  // descl2 = ``ndcg``, `NDCG <https://en.wikipedia.org/wiki/Discounted_cumulative_gain#Normalized_DCG>`__, aliases: ``lambdarank``, ``rank_xendcg``, ``xendcg``, ``xe_ndcg``, ``xe_ndcg_mart``, ``xendcg_mart``
998
999
  // descl2 = ``map``, `MAP <https://makarandtapaswi.wordpress.com/2012/07/02/intuition-behind-average-precision-and-map/>`__, aliases: ``mean_average_precision``
  // descl2 = ``auc``, `AUC <https://en.wikipedia.org/wiki/Receiver_operating_characteristic#Area_under_the_curve>`__
1000
  // descl2 = ``average_precision``, `average precision score <https://scikit-learn.org/stable/modules/generated/sklearn.metrics.average_precision_score.html>`__
1001
1002
  // descl2 = ``binary_logloss``, `log loss <https://en.wikipedia.org/wiki/Cross_entropy>`__, aliases: ``binary``
  // descl2 = ``binary_error``, for one sample: ``0`` for correct classification, ``1`` for error classification
Belinda Trotta's avatar
Belinda Trotta committed
1003
  // descl2 = ``auc_mu``, `AUC-mu <http://proceedings.mlr.press/v97/kleiman19a/kleiman19a.pdf>`__
1004
1005
  // descl2 = ``multi_logloss``, log loss for multi-class classification, aliases: ``multiclass``, ``softmax``, ``multiclassova``, ``multiclass_ova``, ``ova``, ``ovr``
  // descl2 = ``multi_error``, error rate for multi-class classification
Guolin Ke's avatar
Guolin Ke committed
1006
1007
1008
  // descl2 = ``cross_entropy``, cross-entropy (with optional linear weights), aliases: ``xentropy``
  // descl2 = ``cross_entropy_lambda``, "intensity-weighted" cross-entropy, aliases: ``xentlambda``
  // descl2 = ``kullback_leibler``, `Kullback-Leibler divergence <https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence>`__, aliases: ``kldiv``
1009
  // desc = support multiple metrics, separated by ``,``
Guolin Ke's avatar
Guolin Ke committed
1010
1011
  std::vector<std::string> metric;

1012
  // [no-save]
1013
  // check = >0
Guolin Ke's avatar
Guolin Ke committed
1014
1015
  // alias = output_freq
  // desc = frequency for metric output
1016
  // desc = **Note**: can be used only in CLI version
Guolin Ke's avatar
Guolin Ke committed
1017
1018
  int metric_freq = 1;

1019
  // [no-save]
1020
1021
  // alias = training_metric, is_training_metric, train_metric
  // desc = set this to ``true`` to output metric result over training dataset
1022
  // desc = **Note**: can be used only in CLI version
1023
  bool is_provide_training_metric = false;
1024

1025
1026
  // type = multi-int
  // default = 1,2,3,4,5
1027
  // alias = ndcg_eval_at, ndcg_at, map_eval_at, map_at
1028
  // desc = used only with ``ndcg`` and ``map`` metrics
1029
  // desc = `NDCG <https://en.wikipedia.org/wiki/Discounted_cumulative_gain#Normalized_DCG>`__ and `MAP <https://makarandtapaswi.wordpress.com/2012/07/02/intuition-behind-average-precision-and-map/>`__ evaluation positions, separated by ``,``
Guolin Ke's avatar
Guolin Ke committed
1030
  std::vector<int> eval_at;
Guolin Ke's avatar
Guolin Ke committed
1031

Belinda Trotta's avatar
Belinda Trotta committed
1032
1033
1034
1035
1036
1037
1038
1039
  // check = >0
  // desc = used only with ``multi_error`` metric
  // desc = threshold for top-k multi-error metric
  // desc = the error on each sample is ``0`` if the true class is among the top ``multi_error_top_k`` predictions, and ``1`` otherwise
  // descl2 = more precisely, the error on a sample is ``0`` if there are at least ``num_classes - multi_error_top_k`` predictions strictly less than the prediction on the true class
  // desc = when ``multi_error_top_k=1`` this is equivalent to the usual multi-error metric
  int multi_error_top_k = 1;

Belinda Trotta's avatar
Belinda Trotta committed
1040
1041
1042
1043
1044
1045
1046
1047
1048
  // type = multi-double
  // default = None
  // desc = used only with ``auc_mu`` metric
  // desc = list representing flattened matrix (in row-major order) giving loss weights for classification errors
  // desc = list should have ``n * n`` elements, where ``n`` is the number of classes
  // desc = the matrix co-ordinate ``[i, j]`` should correspond to the ``i * n + j``-th element of the list
  // desc = if not specified, will use equal weights for all classes
  std::vector<double> auc_mu_weights;

1049
  #ifndef __NVCC__
Guolin Ke's avatar
Guolin Ke committed
1050
1051
1052
  #pragma endregion

  #pragma region Network Parameters
1053
  #endif  // __NVCC__
Guolin Ke's avatar
Guolin Ke committed
1054

1055
1056
  // check = >0
  // alias = num_machine
1057
  // desc = the number of machines for distributed learning application
1058
  // desc = this parameter is needed to be set in both **socket** and **mpi** versions
Guolin Ke's avatar
Guolin Ke committed
1059
  int num_machines = 1;
Guolin Ke's avatar
Guolin Ke committed
1060

1061
  // check = >0
1062
  // default = 12400 (random for Dask-package)
1063
1064
1065
  // alias = local_port, port
  // desc = TCP listen port for local machines
  // desc = **Note**: don't forget to allow this port in firewall settings before training
Guolin Ke's avatar
Guolin Ke committed
1066
  int local_listen_port = 12400;
Guolin Ke's avatar
Guolin Ke committed
1067

1068
1069
1070
  // check = >0
  // desc = socket time-out in minutes
  int time_out = 120;
Guolin Ke's avatar
Guolin Ke committed
1071

1072
  // alias = machine_list_file, machine_list, mlist
1073
  // desc = path of file that lists machines for this distributed learning application
1074
  // desc = each line contains one IP and one port for one machine. The format is ``ip port`` (space as a separator)
1075
  // desc = **Note**: can be used only in CLI version
Guolin Ke's avatar
Guolin Ke committed
1076
  std::string machine_list_filename = "";
Guolin Ke's avatar
Guolin Ke committed
1077

1078
1079
  // alias = workers, nodes
  // desc = list of machines in the following format: ``ip1:port1,ip2:port2``
1080
  std::string machines = "";
Guolin Ke's avatar
Guolin Ke committed
1081

1082
  #ifndef __NVCC__
Guolin Ke's avatar
Guolin Ke committed
1083
1084
1085
  #pragma endregion

  #pragma region GPU Parameters
1086
  #endif  // __NVCC__
Guolin Ke's avatar
Guolin Ke committed
1087

1088
1089
  // desc = OpenCL platform ID. Usually each GPU vendor exposes one OpenCL platform
  // desc = ``-1`` means the system-wide default platform
1090
  // desc = **Note**: refer to `GPU Targets <./GPU-Targets.rst#query-opencl-devices-in-your-system>`__ for more details
Guolin Ke's avatar
Guolin Ke committed
1091
1092
  int gpu_platform_id = -1;

1093
1094
  // desc = OpenCL device ID in the specified platform. Each GPU in the selected platform has a unique device ID
  // desc = ``-1`` means the default device in the selected platform
1095
  // desc = **Note**: refer to `GPU Targets <./GPU-Targets.rst#query-opencl-devices-in-your-system>`__ for more details
Guolin Ke's avatar
Guolin Ke committed
1096
1097
  int gpu_device_id = -1;

1098
1099
  // desc = set this to ``true`` to use double precision math on GPU (by default single precision is used)
  // desc = **Note**: can be used only in OpenCL implementation, in CUDA implementation only double precision is currently supported
Guolin Ke's avatar
Guolin Ke committed
1100
1101
  bool gpu_use_dp = false;

1102
1103
1104
1105
1106
  // check = >0
  // desc = number of GPUs
  // desc = **Note**: can be used only in CUDA implementation
  int num_gpu = 1;

1107
  #ifndef __NVCC__
Guolin Ke's avatar
Guolin Ke committed
1108
1109
1110
  #pragma endregion

  #pragma endregion
1111
  #endif  // __NVCC__
Guolin Ke's avatar
Guolin Ke committed
1112

1113
1114
  size_t file_load_progress_interval_bytes = size_t(10) * 1024 * 1024 * 1024;

Guolin Ke's avatar
Guolin Ke committed
1115
  bool is_parallel = false;
1116
  bool is_data_based_parallel = false;
Guolin Ke's avatar
Guolin Ke committed
1117
  LIGHTGBM_EXPORT void Set(const std::unordered_map<std::string, std::string>& params);
jcipar's avatar
jcipar committed
1118
  static const std::unordered_map<std::string, std::string>& alias_table();
1119
  static const std::unordered_map<std::string, std::vector<std::string>>& parameter2aliases();
jcipar's avatar
jcipar committed
1120
  static const std::unordered_set<std::string>& parameter_set();
Belinda Trotta's avatar
Belinda Trotta committed
1121
  std::vector<std::vector<double>> auc_mu_weights_matrix;
1122
  std::vector<std::vector<int>> interaction_constraints_vector;
1123
  static const std::unordered_map<std::string, std::string>& ParameterTypes();
1124
  static const std::string DumpAliases();
1125

Nikita Titov's avatar
Nikita Titov committed
1126
 private:
Guolin Ke's avatar
Guolin Ke committed
1127
  void CheckParamConflict();
Guolin Ke's avatar
Guolin Ke committed
1128
1129
  void GetMembersFromString(const std::unordered_map<std::string, std::string>& params);
  std::string SaveMembersToString() const;
Belinda Trotta's avatar
Belinda Trotta committed
1130
  void GetAucMuWeights();
1131
  void GetInteractionConstraints();
Guolin Ke's avatar
Guolin Ke committed
1132
1133
};

Guolin Ke's avatar
Guolin Ke committed
1134
inline bool Config::GetString(
Guolin Ke's avatar
Guolin Ke committed
1135
1136
  const std::unordered_map<std::string, std::string>& params,
  const std::string& name, std::string* out) {
1137
  if (params.count(name) > 0 && !params.at(name).empty()) {
Guolin Ke's avatar
Guolin Ke committed
1138
1139
1140
1141
1142
1143
    *out = params.at(name);
    return true;
  }
  return false;
}

Guolin Ke's avatar
Guolin Ke committed
1144
inline bool Config::GetInt(
Guolin Ke's avatar
Guolin Ke committed
1145
1146
  const std::unordered_map<std::string, std::string>& params,
  const std::string& name, int* out) {
1147
  if (params.count(name) > 0 && !params.at(name).empty()) {
1148
    if (!Common::AtoiAndCheck(params.at(name).c_str(), out)) {
1149
      Log::Fatal("Parameter %s should be of type int, got \"%s\"",
Guolin Ke's avatar
Guolin Ke committed
1150
                 name.c_str(), params.at(name).c_str());
1151
    }
Guolin Ke's avatar
Guolin Ke committed
1152
1153
1154
1155
1156
    return true;
  }
  return false;
}

Guolin Ke's avatar
Guolin Ke committed
1157
inline bool Config::GetDouble(
Guolin Ke's avatar
Guolin Ke committed
1158
  const std::unordered_map<std::string, std::string>& params,
1159
  const std::string& name, double* out) {
1160
  if (params.count(name) > 0 && !params.at(name).empty()) {
1161
    if (!Common::AtofAndCheck(params.at(name).c_str(), out)) {
1162
      Log::Fatal("Parameter %s should be of type double, got \"%s\"",
Guolin Ke's avatar
Guolin Ke committed
1163
                 name.c_str(), params.at(name).c_str());
1164
    }
Guolin Ke's avatar
Guolin Ke committed
1165
1166
1167
1168
1169
    return true;
  }
  return false;
}

Guolin Ke's avatar
Guolin Ke committed
1170
inline bool Config::GetBool(
Guolin Ke's avatar
Guolin Ke committed
1171
1172
  const std::unordered_map<std::string, std::string>& params,
  const std::string& name, bool* out) {
1173
  if (params.count(name) > 0 && !params.at(name).empty()) {
Guolin Ke's avatar
Guolin Ke committed
1174
    std::string value = params.at(name);
Guolin Ke's avatar
Guolin Ke committed
1175
    std::transform(value.begin(), value.end(), value.begin(), Common::tolower);
1176
    if (value == std::string("false") || value == std::string("-")) {
Guolin Ke's avatar
Guolin Ke committed
1177
      *out = false;
1178
    } else if (value == std::string("true") || value == std::string("+")) {
Guolin Ke's avatar
Guolin Ke committed
1179
      *out = true;
1180
    } else {
1181
      Log::Fatal("Parameter %s should be \"true\"/\"+\" or \"false\"/\"-\", got \"%s\"",
Guolin Ke's avatar
Guolin Ke committed
1182
                 name.c_str(), params.at(name).c_str());
Guolin Ke's avatar
Guolin Ke committed
1183
1184
1185
1186
1187
1188
    }
    return true;
  }
  return false;
}

1189
1190
1191
1192
inline bool Config::SortAlias(const std::string& x, const std::string& y) {
  return x.size() < y.size() || (x.size() == y.size() && x < y);
}

Guolin Ke's avatar
Guolin Ke committed
1193
1194
1195
1196
struct ParameterAlias {
  static void KeyAliasTransform(std::unordered_map<std::string, std::string>* params) {
    std::unordered_map<std::string, std::string> tmp_map;
    for (const auto& pair : *params) {
jcipar's avatar
jcipar committed
1197
1198
      auto alias = Config::alias_table().find(pair.first);
      if (alias != Config::alias_table().end()) {  // found alias
Guolin Ke's avatar
Guolin Ke committed
1199
        auto alias_set = tmp_map.find(alias->second);
1200
        if (alias_set != tmp_map.end()) {  // alias already set
1201
          if (Config::SortAlias(alias_set->second, pair.first)) {
1202
            Log::Warning("%s is set with %s=%s, %s=%s will be ignored. Current value: %s=%s",
Guolin Ke's avatar
Guolin Ke committed
1203
1204
                         alias->second.c_str(), alias_set->second.c_str(), params->at(alias_set->second).c_str(),
                         pair.first.c_str(), pair.second.c_str(), alias->second.c_str(), params->at(alias_set->second).c_str());
wxchan's avatar
wxchan committed
1205
          } else {
1206
            Log::Warning("%s is set with %s=%s, will be overridden by %s=%s. Current value: %s=%s",
Guolin Ke's avatar
Guolin Ke committed
1207
1208
                         alias->second.c_str(), alias_set->second.c_str(), params->at(alias_set->second).c_str(),
                         pair.first.c_str(), pair.second.c_str(), alias->second.c_str(), pair.second.c_str());
wxchan's avatar
wxchan committed
1209
1210
            tmp_map[alias->second] = pair.first;
          }
1211
        } else {  // alias not set
wxchan's avatar
wxchan committed
1212
1213
          tmp_map.emplace(alias->second, pair.first);
        }
jcipar's avatar
jcipar committed
1214
      } else if (Config::parameter_set().find(pair.first) == Config::parameter_set().end()) {
wxchan's avatar
wxchan committed
1215
        Log::Warning("Unknown parameter: %s", pair.first.c_str());
Guolin Ke's avatar
Guolin Ke committed
1216
1217
1218
      }
    }
    for (const auto& pair : tmp_map) {
wxchan's avatar
wxchan committed
1219
      auto alias = params->find(pair.first);
1220
      if (alias == params->end()) {  // not find
wxchan's avatar
wxchan committed
1221
1222
1223
        params->emplace(pair.first, params->at(pair.second));
        params->erase(pair.second);
      } else {
Guolin Ke's avatar
Guolin Ke committed
1224
1225
1226
        Log::Warning("%s is set=%s, %s=%s will be ignored. Current value: %s=%s",
                     pair.first.c_str(), alias->second.c_str(), pair.second.c_str(), params->at(pair.second).c_str(),
                     pair.first.c_str(), alias->second.c_str());
Guolin Ke's avatar
Guolin Ke committed
1227
1228
1229
1230
1231
      }
    }
  }
};

1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
inline std::string ParseObjectiveAlias(const std::string& type) {
  if (type == std::string("regression") || type == std::string("regression_l2")
    || type == std::string("mean_squared_error") || type == std::string("mse") || type == std::string("l2")
    || type == std::string("l2_root") || type == std::string("root_mean_squared_error") || type == std::string("rmse")) {
    return "regression";
  } else if (type == std::string("regression_l1") || type == std::string("mean_absolute_error")
    || type == std::string("l1") || type == std::string("mae")) {
    return "regression_l1";
  } else if (type == std::string("multiclass") || type == std::string("softmax")) {
    return "multiclass";
  } else if (type == std::string("multiclassova") || type == std::string("multiclass_ova") || type == std::string("ova") || type == std::string("ovr")) {
    return "multiclassova";
  } else if (type == std::string("xentropy") || type == std::string("cross_entropy")) {
    return "cross_entropy";
  } else if (type == std::string("xentlambda") || type == std::string("cross_entropy_lambda")) {
    return "cross_entropy_lambda";
  } else if (type == std::string("mean_absolute_percentage_error") || type == std::string("mape")) {
    return "mape";
1250
1251
1252
  } else if (type == std::string("rank_xendcg") || type == std::string("xendcg") || type == std::string("xe_ndcg")
             || type == std::string("xe_ndcg_mart") || type == std::string("xendcg_mart")) {
    return "rank_xendcg";
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
  } else if (type == std::string("none") || type == std::string("null") || type == std::string("custom") || type == std::string("na")) {
    return "custom";
  }
  return type;
}

inline std::string ParseMetricAlias(const std::string& type) {
  if (type == std::string("regression") || type == std::string("regression_l2") || type == std::string("l2") || type == std::string("mean_squared_error") || type == std::string("mse")) {
    return "l2";
  } else if (type == std::string("l2_root") || type == std::string("root_mean_squared_error") || type == std::string("rmse")) {
    return "rmse";
  } else if (type == std::string("regression_l1") || type == std::string("l1") || type == std::string("mean_absolute_error") || type == std::string("mae")) {
    return "l1";
  } else if (type == std::string("binary_logloss") || type == std::string("binary")) {
    return "binary_logloss";
1268
1269
  } else if (type == std::string("ndcg") || type == std::string("lambdarank") || type == std::string("rank_xendcg")
             || type == std::string("xendcg") || type == std::string("xe_ndcg") || type == std::string("xe_ndcg_mart") || type == std::string("xendcg_mart")) {
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
    return "ndcg";
  } else if (type == std::string("map") || type == std::string("mean_average_precision")) {
    return "map";
  } else if (type == std::string("multi_logloss") || type == std::string("multiclass") || type == std::string("softmax") || type == std::string("multiclassova") || type == std::string("multiclass_ova") || type == std::string("ova") || type == std::string("ovr")) {
    return "multi_logloss";
  } else if (type == std::string("xentropy") || type == std::string("cross_entropy")) {
    return "cross_entropy";
  } else if (type == std::string("xentlambda") || type == std::string("cross_entropy_lambda")) {
    return "cross_entropy_lambda";
  } else if (type == std::string("kldiv") || type == std::string("kullback_leibler")) {
    return "kullback_leibler";
  } else if (type == std::string("mean_absolute_percentage_error") || type == std::string("mape")) {
    return "mape";
  } else if (type == std::string("none") || type == std::string("null") || type == std::string("custom") || type == std::string("na")) {
    return "custom";
  }
  return type;
}

Guolin Ke's avatar
Guolin Ke committed
1289
1290
}   // namespace LightGBM

Belinda Trotta's avatar
Belinda Trotta committed
1291
#endif   // LightGBM_CONFIG_H_