config.h 68.8 KB
Newer Older
1
2
3
4
5
/*!
 * Copyright (c) 2016 Microsoft Corporation. All rights reserved.
 * Licensed under the MIT License. See LICENSE file in the project root for license information.
 *
 * \note
6
7
 * - desc and descl2 fields must be written in reStructuredText format;
 * - nested sections can be placed only at the bottom of parent's section;
8
9
10
11
12
13
14
 * - [no-automatically-extract]
 *       - do not automatically extract this parameter into a Config property with the same name in Config::GetMembersFromString(). Use if:
 *           - specialized extraction logic for this param exists in Config::GetMembersFromString()
 * - [no-save]
 *       - this param should not be saved into a model text representation via Config::SaveMembersToString(). Use if:
 *           - param is only used by the CLI (especially the "predict" and "convert_model" tasks)
 *           - param is related to LightGBM writing files (e.g. "output_model", "save_binary")
15
 */
Guolin Ke's avatar
Guolin Ke committed
16
17
18
#ifndef LIGHTGBM_CONFIG_H_
#define LIGHTGBM_CONFIG_H_

19
20
21
22
23
#include <LightGBM/export.h>
#include <LightGBM/meta.h>
#include <LightGBM/utils/common.h>
#include <LightGBM/utils/log.h>

Guolin Ke's avatar
Guolin Ke committed
24
25
#include <string>
#include <algorithm>
Guolin Ke's avatar
Guolin Ke committed
26
#include <memory>
27
28
29
#include <unordered_map>
#include <unordered_set>
#include <vector>
Guolin Ke's avatar
Guolin Ke committed
30
31
32

namespace LightGBM {

Guolin Ke's avatar
Guolin Ke committed
33
34
/*! \brief Types of tasks */
enum TaskType {
35
  kTrain, kPredict, kConvertModel, KRefitTree, kSaveBinary
Guolin Ke's avatar
Guolin Ke committed
36
};
37
const int kDefaultNumLeaves = 31;
Guolin Ke's avatar
Guolin Ke committed
38

Guolin Ke's avatar
Guolin Ke committed
39
struct Config {
Nikita Titov's avatar
Nikita Titov committed
40
 public:
41
42
43
44
  Config() {}
  explicit Config(std::unordered_map<std::string, std::string> parameters_map) {
    Set(parameters_map);
  }
Guolin Ke's avatar
Guolin Ke committed
45
  std::string ToString() const;
Guolin Ke's avatar
Guolin Ke committed
46
47
48
49
  /*!
  * \brief Get string value by specific name of key
  * \param params Store the key and value for params
  * \param name Name of key
Hui Xue's avatar
Hui Xue committed
50
  * \param out Value will assign to out if key exists
Guolin Ke's avatar
Guolin Ke committed
51
52
  * \return True if key exists
  */
Guolin Ke's avatar
Guolin Ke committed
53
  inline static bool GetString(
Guolin Ke's avatar
Guolin Ke committed
54
55
56
57
58
59
60
    const std::unordered_map<std::string, std::string>& params,
    const std::string& name, std::string* out);

  /*!
  * \brief Get int value by specific name of key
  * \param params Store the key and value for params
  * \param name Name of key
Hui Xue's avatar
Hui Xue committed
61
  * \param out Value will assign to out if key exists
Guolin Ke's avatar
Guolin Ke committed
62
63
  * \return True if key exists
  */
Guolin Ke's avatar
Guolin Ke committed
64
  inline static bool GetInt(
Guolin Ke's avatar
Guolin Ke committed
65
66
67
68
    const std::unordered_map<std::string, std::string>& params,
    const std::string& name, int* out);

  /*!
69
  * \brief Get double value by specific name of key
Guolin Ke's avatar
Guolin Ke committed
70
71
  * \param params Store the key and value for params
  * \param name Name of key
Hui Xue's avatar
Hui Xue committed
72
  * \param out Value will assign to out if key exists
Guolin Ke's avatar
Guolin Ke committed
73
74
  * \return True if key exists
  */
Guolin Ke's avatar
Guolin Ke committed
75
  inline static bool GetDouble(
Guolin Ke's avatar
Guolin Ke committed
76
    const std::unordered_map<std::string, std::string>& params,
77
    const std::string& name, double* out);
Guolin Ke's avatar
Guolin Ke committed
78
79
80
81
82

  /*!
  * \brief Get bool value by specific name of key
  * \param params Store the key and value for params
  * \param name Name of key
Hui Xue's avatar
Hui Xue committed
83
  * \param out Value will assign to out if key exists
Guolin Ke's avatar
Guolin Ke committed
84
85
  * \return True if key exists
  */
Guolin Ke's avatar
Guolin Ke committed
86
  inline static bool GetBool(
Guolin Ke's avatar
Guolin Ke committed
87
88
    const std::unordered_map<std::string, std::string>& params,
    const std::string& name, bool* out);
89

90
91
92
93
94
95
96
97
  /*!
  * \brief Sort aliases by length and then alphabetically
  * \param x Alias 1
  * \param y Alias 2
  * \return true if x has higher priority than y
  */
  inline static bool SortAlias(const std::string& x, const std::string& y);

98
99
100
  static void KeepFirstValues(const std::unordered_map<std::string, std::vector<std::string>>& params, std::unordered_map<std::string, std::string>* out);
  static void KV2Map(std::unordered_map<std::string, std::vector<std::string>>* params, const char* kv);
  static void SetVerbosity(const std::unordered_map<std::string, std::vector<std::string>>& params);
101
  static std::unordered_map<std::string, std::string> Str2Map(const char* parameters);
Guolin Ke's avatar
Guolin Ke committed
102

103
  #ifndef __NVCC__
Guolin Ke's avatar
Guolin Ke committed
104
  #pragma region Parameters
105

Guolin Ke's avatar
Guolin Ke committed
106
  #pragma region Core Parameters
107
  #endif  // __NVCC__
Guolin Ke's avatar
Guolin Ke committed
108

109
  // [no-automatically-extract]
110
  // [no-save]
111
112
  // alias = config_file
  // desc = path of config file
113
  // desc = **Note**: can be used only in CLI version
Guolin Ke's avatar
Guolin Ke committed
114
115
  std::string config = "";

116
  // [no-automatically-extract]
117
  // [no-save]
118
119
120
121
122
123
  // type = enum
  // default = train
  // options = train, predict, convert_model, refit
  // alias = task_type
  // desc = ``train``, for training, aliases: ``training``
  // desc = ``predict``, for prediction, aliases: ``prediction``, ``test``
Nikita Titov's avatar
Nikita Titov committed
124
  // desc = ``convert_model``, for converting model file into if-else format, see more information in `Convert Parameters <#convert-parameters>`__
125
  // desc = ``refit``, for refitting existing models with new data, aliases: ``refit_tree``
126
  // desc = ``save_binary``, load train (and validation) data then save dataset to binary file. Typical usage: ``save_binary`` first, then run multiple ``train`` tasks in parallel using the saved binary file
Guolin Ke's avatar
Guolin Ke committed
127
  // desc = **Note**: can be used only in CLI version; for language-specific packages you can use the correspondent functions
Guolin Ke's avatar
Guolin Ke committed
128
129
  TaskType task = TaskType::kTrain;

130
131
  // [no-automatically-extract]
  // [no-save]
132
  // type = enum
133
  // options = regression, regression_l1, huber, fair, poisson, quantile, mape, gamma, tweedie, binary, multiclass, multiclassova, cross_entropy, cross_entropy_lambda, lambdarank, rank_xendcg
134
  // alias = objective_type, app, application, loss
135
  // desc = regression application
Guolin Ke's avatar
Guolin Ke committed
136
137
  // descl2 = ``regression``, L2 loss, aliases: ``regression_l2``, ``l2``, ``mean_squared_error``, ``mse``, ``l2_root``, ``root_mean_squared_error``, ``rmse``
  // descl2 = ``regression_l1``, L1 loss, aliases: ``l1``, ``mean_absolute_error``, ``mae``
138
139
140
141
142
  // descl2 = ``huber``, `Huber loss <https://en.wikipedia.org/wiki/Huber_loss>`__
  // descl2 = ``fair``, `Fair loss <https://www.kaggle.com/c/allstate-claims-severity/discussion/24520>`__
  // descl2 = ``poisson``, `Poisson regression <https://en.wikipedia.org/wiki/Poisson_regression>`__
  // descl2 = ``quantile``, `Quantile regression <https://en.wikipedia.org/wiki/Quantile_regression>`__
  // descl2 = ``mape``, `MAPE loss <https://en.wikipedia.org/wiki/Mean_absolute_percentage_error>`__, aliases: ``mean_absolute_percentage_error``
143
  // descl2 = ``gamma``, Gamma regression with log-link. It might be useful, e.g., for modeling insurance claims severity, or for any target that might be `gamma-distributed <https://en.wikipedia.org/wiki/Gamma_distribution#Occurrence_and_applications>`__
144
  // descl2 = ``tweedie``, Tweedie regression with log-link. It might be useful, e.g., for modeling total loss in insurance, or for any target that might be `tweedie-distributed <https://en.wikipedia.org/wiki/Tweedie_distribution#Occurrence_and_applications>`__
145
146
147
  // desc = binary classification application
  // descl2 = ``binary``, binary `log loss <https://en.wikipedia.org/wiki/Cross_entropy>`__ classification (or logistic regression)
  // descl2 = requires labels in {0, 1}; see ``cross-entropy`` application for general probability labels in [0, 1]
148
149
150
151
152
  // desc = multi-class classification application
  // descl2 = ``multiclass``, `softmax <https://en.wikipedia.org/wiki/Softmax_function>`__ objective function, aliases: ``softmax``
  // descl2 = ``multiclassova``, `One-vs-All <https://en.wikipedia.org/wiki/Multiclass_classification#One-vs.-rest>`__ binary objective function, aliases: ``multiclass_ova``, ``ova``, ``ovr``
  // descl2 = ``num_class`` should be set as well
  // desc = cross-entropy application
Guolin Ke's avatar
Guolin Ke committed
153
154
  // descl2 = ``cross_entropy``, objective function for cross-entropy (with optional linear weights), aliases: ``xentropy``
  // descl2 = ``cross_entropy_lambda``, alternative parameterization of cross-entropy, aliases: ``xentlambda``
155
  // descl2 = label is anything in interval [0, 1]
156
  // desc = ranking application
157
  // descl2 = ``lambdarank``, `lambdarank <https://proceedings.neurips.cc/paper_files/paper/2006/file/af44c4c56f385c43f2529f9b1b018f6a-Paper.pdf>`__ objective. `label_gain <#label_gain>`__ can be used to set the gain (weight) of ``int`` label and all values in ``label`` must be smaller than number of elements in ``label_gain``
158
159
  // descl2 = ``rank_xendcg``, `XE_NDCG_MART <https://arxiv.org/abs/1911.09798>`__ ranking objective function, aliases: ``xendcg``, ``xe_ndcg``, ``xe_ndcg_mart``, ``xendcg_mart``
  // descl2 = ``rank_xendcg`` is faster than and achieves the similar performance as ``lambdarank``
160
  // descl2 = label should be ``int`` type, and larger number represents the higher relevance (e.g. 0:bad, 1:fair, 2:good, 3:perfect)
161
162
163
164
  // desc = custom objective function (gradients and hessians not computed directly by LightGBM)
  // descl2 = ``custom``
  // descl2 = **Note**: Not supported in CLI version
  // descl2 = must be passed through parameters explicitly in the C API
Guolin Ke's avatar
Guolin Ke committed
165
166
  std::string objective = "regression";

167
168
  // [no-automatically-extract]
  // [no-save]
169
170
  // type = enum
  // alias = boosting_type, boost
171
  // options = gbdt, rf, dart
172
173
  // desc = ``gbdt``, traditional Gradient Boosting Decision Tree, aliases: ``gbrt``
  // desc = ``rf``, Random Forest, aliases: ``random_forest``
174
  // desc = ``dart``, `Dropouts meet Multiple Additive Regression Trees <https://arxiv.org/abs/1505.01866>`__
Nikita Titov's avatar
Nikita Titov committed
175
  // descl2 = **Note**: internally, LightGBM uses ``gbdt`` mode for the first ``1 / learning_rate`` iterations
Guolin Ke's avatar
Guolin Ke committed
176
177
  std::string boosting = "gbdt";

178
  // [no-automatically-extract]
179
180
181
182
183
  // type = enum
  // options = bagging, goss
  // desc = ``bagging``, Randomly Bagging Sampling
  // descl2 = **Note**: ``bagging`` is only effective when ``bagging_freq > 0`` and ``bagging_fraction < 1.0``
  // desc = ``goss``, Gradient-based One-Side Sampling
184
  // desc = *New in 4.0.0*
185
186
  std::string data_sample_strategy = "bagging";

187
  // alias = train, train_data, train_data_file, data_filename
188
  // desc = path of training data, LightGBM will train from this data
189
  // desc = **Note**: can be used only in CLI version
Guolin Ke's avatar
Guolin Ke committed
190
191
  std::string data = "";

192
  // alias = test, valid_data, valid_data_file, test_data, test_data_file, valid_filenames
193
  // default = ""
194
  // desc = path(s) of validation/test data, LightGBM will output metrics for these data
195
  // desc = support multiple validation data, separated by ``,``
196
  // desc = **Note**: can be used only in CLI version
Guolin Ke's avatar
Guolin Ke committed
197
198
  std::vector<std::string> valid;

199
  // alias = num_iteration, n_iter, num_tree, num_trees, num_round, num_rounds, nrounds, num_boost_round, n_estimators, max_iter
200
201
202
  // check = >=0
  // desc = number of boosting iterations
  // desc = **Note**: internally, LightGBM constructs ``num_class * num_iterations`` trees for multi-class classification problems
Guolin Ke's avatar
Guolin Ke committed
203
  int num_iterations = 100;
Guolin Ke's avatar
Guolin Ke committed
204

205
  // alias = shrinkage_rate, eta
206
  // check = >0.0
207
208
  // desc = shrinkage rate
  // desc = in ``dart``, it also affects on normalization weights of dropped trees
Guolin Ke's avatar
Guolin Ke committed
209
210
  double learning_rate = 0.1;

211
  // default = 31
212
  // alias = num_leaf, max_leaves, max_leaf, max_leaf_nodes
213
  // check = >1
214
  // check = <=131072
215
  // desc = max number of leaves in one tree
Guolin Ke's avatar
Guolin Ke committed
216
217
  int num_leaves = kDefaultNumLeaves;

218
219
  // [no-automatically-extract]
  // [no-save]
220
221
  // type = enum
  // options = serial, feature, data, voting
222
  // alias = tree, tree_type, tree_learner_type
223
224
225
226
  // desc = ``serial``, single machine tree learner
  // desc = ``feature``, feature parallel tree learner, aliases: ``feature_parallel``
  // desc = ``data``, data parallel tree learner, aliases: ``data_parallel``
  // desc = ``voting``, voting parallel tree learner, aliases: ``voting_parallel``
227
  // desc = refer to `Distributed Learning Guide <./Parallel-Learning-Guide.rst>`__ to get more details
Guolin Ke's avatar
Guolin Ke committed
228
229
  std::string tree_learner = "serial";

230
  // alias = num_thread, nthread, nthreads, n_jobs
231
  // desc = used only in ``train``, ``prediction`` and ``refit`` tasks or in correspondent functions of language-specific packages
Guolin Ke's avatar
Guolin Ke committed
232
  // desc = number of threads for LightGBM
233
234
235
236
  // desc = ``0`` means default number of threads in OpenMP
  // desc = for the best speed, set this to the number of **real CPU cores**, not the number of threads (most CPUs use `hyper-threading <https://en.wikipedia.org/wiki/Hyper-threading>`__ to generate 2 threads per CPU core)
  // desc = do not set it too large if your dataset is small (for instance, do not use 64 threads for a dataset with 10,000 rows)
  // desc = be aware a task manager or any similar CPU monitoring tool might report that cores not being fully utilized. **This is normal**
237
  // desc = for distributed learning, do not use all CPU cores because this will cause poor performance for the network communication
238
  // desc = **Note**: please **don't** change this during training, especially when running multiple jobs simultaneously by external packages, otherwise it may cause undesirable errors
Guolin Ke's avatar
Guolin Ke committed
239
240
  int num_threads = 0;

241
242
  // [no-automatically-extract]
  // [no-save]
243
  // type = enum
244
  // options = cpu, gpu, cuda
245
  // alias = device
246
247
248
249
  // desc = device for the tree learning
  // desc = ``cpu`` supports all LightGBM functionality and is portable across the widest range of operating systems and hardware
  // desc = ``cuda`` offers faster training than ``gpu`` or ``cpu``, but only works on GPUs supporting CUDA
  // desc = ``gpu`` can be faster than ``cpu`` and works on a wider range of GPUs than CUDA
250
251
252
  // desc = **Note**: it is recommended to use the smaller ``max_bin`` (e.g. 63) to get the better speed up
  // desc = **Note**: for the faster speed, GPU uses 32-bit float point to sum up by default, so this may affect the accuracy for some tasks. You can set ``gpu_use_dp=true`` to enable 64-bit float point, but it will slow down the training
  // desc = **Note**: refer to `Installation Guide <./Installation-Guide.rst#build-gpu-version>`__ to build LightGBM with GPU support
Guolin Ke's avatar
Guolin Ke committed
253
254
  std::string device_type = "cpu";

255
  // [no-automatically-extract]
256
  // alias = random_seed, random_state
257
258
259
260
  // default = None
  // desc = this seed is used to generate other seeds, e.g. ``data_random_seed``, ``feature_fraction_seed``, etc.
  // desc = by default, this seed is unused in favor of default values of other seeds
  // desc = this seed has lower priority in comparison with other seeds, which means that it will be overridden, if you set other seeds explicitly
Guolin Ke's avatar
Guolin Ke committed
261
262
  int seed = 0;

Guolin Ke's avatar
Guolin Ke committed
263
264
265
266
267
  // desc = used only with ``cpu`` device type
  // desc = setting this to ``true`` should ensure the stable results when using the same data and the same parameters (and different ``num_threads``)
  // desc = when you use the different seeds, different LightGBM versions, the binaries compiled by different compilers, or in different systems, the results are expected to be different
  // desc = you can `raise issues <https://github.com/microsoft/LightGBM/issues>`__ in LightGBM GitHub repo when you meet the unstable results
  // desc = **Note**: setting this to ``true`` may slow down the training
268
  // desc = **Note**: to avoid potential instability due to numerical issues, please set ``force_col_wise=true`` or ``force_row_wise=true`` when setting ``deterministic=true``
Guolin Ke's avatar
Guolin Ke committed
269
270
  bool deterministic = false;

271
  #ifndef __NVCC__
Guolin Ke's avatar
Guolin Ke committed
272
273
274
  #pragma endregion

  #pragma region Learning Control Parameters
275
  #endif  // __NVCC__
Guolin Ke's avatar
Guolin Ke committed
276

277
278
279
280
  // desc = used only with ``cpu`` device type
  // desc = set this to ``true`` to force col-wise histogram building
  // desc = enabling this is recommended when:
  // descl2 = the number of columns is large, or the total number of bins is large
Nikita Titov's avatar
Nikita Titov committed
281
  // descl2 = ``num_threads`` is large, e.g. ``> 20``
282
283
284
  // descl2 = you want to reduce memory cost
  // desc = **Note**: when both ``force_col_wise`` and ``force_row_wise`` are ``false``, LightGBM will firstly try them both, and then use the faster one. To remove the overhead of testing set the faster one to ``true`` manually
  // desc = **Note**: this parameter cannot be used at the same time with ``force_row_wise``, choose only one of them
285
286
  bool force_col_wise = false;

287
288
289
290
  // desc = used only with ``cpu`` device type
  // desc = set this to ``true`` to force row-wise histogram building
  // desc = enabling this is recommended when:
  // descl2 = the number of data points is large, and the total number of bins is relatively small
Nikita Titov's avatar
Nikita Titov committed
291
  // descl2 = ``num_threads`` is relatively small, e.g. ``<= 16``
292
  // descl2 = you want to use small ``bagging_fraction`` or ``goss`` sample strategy to speed up
293
294
295
  // desc = **Note**: setting this to ``true`` will double the memory cost for Dataset object. If you have not enough memory, you can try setting ``force_col_wise=true``
  // desc = **Note**: when both ``force_col_wise`` and ``force_row_wise`` are ``false``, LightGBM will firstly try them both, and then use the faster one. To remove the overhead of testing set the faster one to ``true`` manually
  // desc = **Note**: this parameter cannot be used at the same time with ``force_col_wise``, choose only one of them
296
297
  bool force_row_wise = false;

298
299
300
301
302
  // alias = hist_pool_size
  // desc = max cache size in MB for historical histogram
  // desc = ``< 0`` means no limit
  double histogram_pool_size = -1.0;

303
  // desc = limit the max depth for tree model. This is used to deal with over-fitting when ``#data`` is small. Tree still grows leaf-wise
304
  // desc = ``<= 0`` means no limit
Guolin Ke's avatar
Guolin Ke committed
305
306
  int max_depth = -1;

307
  // alias = min_data_per_leaf, min_data, min_child_samples, min_samples_leaf
308
309
  // check = >=0
  // desc = minimal number of data in one leaf. Can be used to deal with over-fitting
310
  // desc = **Note**: this is an approximation based on the Hessian, so occasionally you may observe splits which produce leaf nodes that have less than this many observations
Guolin Ke's avatar
Guolin Ke committed
311
312
  int min_data_in_leaf = 20;

313
314
315
  // alias = min_sum_hessian_per_leaf, min_sum_hessian, min_hessian, min_child_weight
  // check = >=0.0
  // desc = minimal sum hessian in one leaf. Like ``min_data_in_leaf``, it can be used to deal with over-fitting
Guolin Ke's avatar
Guolin Ke committed
316
317
  double min_sum_hessian_in_leaf = 1e-3;

318
319
320
321
322
323
324
  // alias = sub_row, subsample, bagging
  // check = >0.0
  // check = <=1.0
  // desc = like ``feature_fraction``, but this will randomly select part of data without resampling
  // desc = can be used to speed up training
  // desc = can be used to deal with over-fitting
  // desc = **Note**: to enable bagging, ``bagging_freq`` should be set to a non zero value as well
Guolin Ke's avatar
Guolin Ke committed
325
326
  double bagging_fraction = 1.0;

Guolin Ke's avatar
Guolin Ke committed
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
  // alias = pos_sub_row, pos_subsample, pos_bagging
  // check = >0.0
  // check = <=1.0
  // desc = used only in ``binary`` application
  // desc = used for imbalanced binary classification problem, will randomly sample ``#pos_samples * pos_bagging_fraction`` positive samples in bagging
  // desc = should be used together with ``neg_bagging_fraction``
  // desc = set this to ``1.0`` to disable
  // desc = **Note**: to enable this, you need to set ``bagging_freq`` and ``neg_bagging_fraction`` as well
  // desc = **Note**: if both ``pos_bagging_fraction`` and ``neg_bagging_fraction`` are set to ``1.0``,  balanced bagging is disabled
  // desc = **Note**: if balanced bagging is enabled, ``bagging_fraction`` will be ignored
  double pos_bagging_fraction = 1.0;

  // alias = neg_sub_row, neg_subsample, neg_bagging
  // check = >0.0
  // check = <=1.0
  // desc = used only in ``binary`` application
  // desc = used for imbalanced binary classification problem, will randomly sample ``#neg_samples * neg_bagging_fraction`` negative samples in bagging
  // desc = should be used together with ``pos_bagging_fraction``
  // desc = set this to ``1.0`` to disable
  // desc = **Note**: to enable this, you need to set ``bagging_freq`` and ``pos_bagging_fraction`` as well
  // desc = **Note**: if both ``pos_bagging_fraction`` and ``neg_bagging_fraction`` are set to ``1.0``,  balanced bagging is disabled
  // desc = **Note**: if balanced bagging is enabled, ``bagging_fraction`` will be ignored
  double neg_bagging_fraction = 1.0;

351
352
  // alias = subsample_freq
  // desc = frequency for bagging
353
  // desc = ``0`` means disable bagging; ``k`` means perform bagging at every ``k`` iteration. Every ``k``-th iteration, LightGBM will randomly select ``bagging_fraction * 100 %`` of the data to use for the next ``k`` iterations
354
  // desc = **Note**: bagging is only effective when ``0.0 < bagging_fraction < 1.0``
Guolin Ke's avatar
Guolin Ke committed
355
356
357
358
359
360
361
  int bagging_freq = 0;

  // alias = bagging_fraction_seed
  // desc = random seed for bagging
  int bagging_seed = 3;

  // alias = sub_feature, colsample_bytree
362
363
  // check = >0.0
  // check = <=1.0
364
  // desc = LightGBM will randomly select a subset of features on each iteration (tree) if ``feature_fraction`` is smaller than ``1.0``. For example, if you set it to ``0.8``, LightGBM will select 80% of features before training each tree
365
366
  // desc = can be used to speed up training
  // desc = can be used to deal with over-fitting
Guolin Ke's avatar
Guolin Ke committed
367
368
  double feature_fraction = 1.0;

369
370
371
  // alias = sub_feature_bynode, colsample_bynode
  // check = >0.0
  // check = <=1.0
372
  // desc = LightGBM will randomly select a subset of features on each tree node if ``feature_fraction_bynode`` is smaller than ``1.0``. For example, if you set it to ``0.8``, LightGBM will select 80% of features at each tree node
373
374
375
376
377
  // desc = can be used to deal with over-fitting
  // desc = **Note**: unlike ``feature_fraction``, this cannot speed up training
  // desc = **Note**: if both ``feature_fraction`` and ``feature_fraction_bynode`` are smaller than ``1.0``, the final fraction of each node is ``feature_fraction * feature_fraction_bynode``
  double feature_fraction_bynode = 1.0;

378
  // desc = random seed for ``feature_fraction``
Guolin Ke's avatar
Guolin Ke committed
379
380
  int feature_fraction_seed = 2;

Nikita Titov's avatar
Nikita Titov committed
381
  // alias = extra_tree
382
383
  // desc = use extremely randomized trees
  // desc = if set to ``true``, when evaluating node splits LightGBM will check only one randomly-chosen threshold for each feature
384
  // desc = can be used to speed up training
385
386
387
388
389
390
  // desc = can be used to deal with over-fitting
  bool extra_trees = false;

  // desc = random seed for selecting thresholds when ``extra_trees`` is true
  int extra_seed = 6;

391
  // alias = early_stopping_rounds, early_stopping, n_iter_no_change
392
393
  // desc = will stop training if one metric of one validation data doesn't improve in last ``early_stopping_round`` rounds
  // desc = ``<= 0`` means disable
394
  // desc = can be used to speed up training
Guolin Ke's avatar
Guolin Ke committed
395
396
  int early_stopping_round = 0;

397
  // desc = LightGBM allows you to provide multiple evaluation metrics. Set this to ``true``, if you want to use only the first metric for early stopping
398
399
  bool first_metric_only = false;

400
401
402
403
  // alias = max_tree_output, max_leaf_output
  // desc = used to limit the max output of tree leaves
  // desc = ``<= 0`` means no constraint
  // desc = the final max output of leaves is ``learning_rate * max_delta_step``
Guolin Ke's avatar
Guolin Ke committed
404
405
  double max_delta_step = 0.0;

406
  // alias = reg_alpha, l1_regularization
407
408
  // check = >=0.0
  // desc = L1 regularization
Guolin Ke's avatar
Guolin Ke committed
409
410
  double lambda_l1 = 0.0;

411
  // alias = reg_lambda, lambda, l2_regularization
412
  // check = >=0.0
Guolin Ke's avatar
Guolin Ke committed
413
414
415
  // desc = L2 regularization
  double lambda_l2 = 0.0;

416
  // check = >=0.0
417
  // desc = linear tree regularization, corresponds to the parameter ``lambda`` in Eq. 3 of `Gradient Boosting with Piece-Wise Linear Regression Trees <https://arxiv.org/pdf/1802.05640.pdf>`__
418
419
  double linear_lambda = 0.0;

420
421
422
  // alias = min_split_gain
  // check = >=0.0
  // desc = the minimal gain to perform split
423
  // desc = can be used to speed up training
Guolin Ke's avatar
Guolin Ke committed
424
425
  double min_gain_to_split = 0.0;

426
  // alias = rate_drop
427
428
429
  // check = >=0.0
  // check = <=1.0
  // desc = used only in ``dart``
430
  // desc = dropout rate: a fraction of previous trees to drop during the dropout
Guolin Ke's avatar
Guolin Ke committed
431
432
  double drop_rate = 0.1;

433
  // desc = used only in ``dart``
434
  // desc = max number of dropped trees during one boosting iteration
435
  // desc = ``<=0`` means no limit
Guolin Ke's avatar
Guolin Ke committed
436
437
  int max_drop = 50;

438
439
440
  // check = >=0.0
  // check = <=1.0
  // desc = used only in ``dart``
441
  // desc = probability of skipping the dropout procedure during a boosting iteration
Guolin Ke's avatar
Guolin Ke committed
442
443
  double skip_drop = 0.5;

444
445
  // desc = used only in ``dart``
  // desc = set this to ``true``, if you want to use xgboost dart mode
Guolin Ke's avatar
Guolin Ke committed
446
447
  bool xgboost_dart_mode = false;

448
449
  // desc = used only in ``dart``
  // desc = set this to ``true``, if you want to use uniform drop
Guolin Ke's avatar
Guolin Ke committed
450
451
  bool uniform_drop = false;

452
453
  // desc = used only in ``dart``
  // desc = random seed to choose dropping models
Guolin Ke's avatar
Guolin Ke committed
454
455
  int drop_seed = 4;

456
457
458
459
  // check = >=0.0
  // check = <=1.0
  // desc = used only in ``goss``
  // desc = the retain ratio of large gradient data
Guolin Ke's avatar
Guolin Ke committed
460
461
  double top_rate = 0.2;

462
463
464
465
  // check = >=0.0
  // check = <=1.0
  // desc = used only in ``goss``
  // desc = the retain ratio of small gradient data
Guolin Ke's avatar
Guolin Ke committed
466
467
  double other_rate = 0.1;

468
469
  // check = >0
  // desc = minimal number of data per categorical group
Guolin Ke's avatar
Guolin Ke committed
470
471
  int min_data_per_group = 100;

472
473
  // check = >0
  // desc = used for the categorical features
474
475
  // desc = limit number of split points considered for categorical features. See `the documentation on how LightGBM finds optimal splits for categorical features <./Features.rst#optimal-split-for-categorical-features>`_ for more details
  // desc = can be used to speed up training
Guolin Ke's avatar
Guolin Ke committed
476
477
  int max_cat_threshold = 32;

478
479
  // check = >=0.0
  // desc = used for the categorical features
480
  // desc = L2 regularization in categorical split
481
  double cat_l2 = 10.0;
Guolin Ke's avatar
Guolin Ke committed
482

483
484
485
486
  // check = >=0.0
  // desc = used for the categorical features
  // desc = this can reduce the effect of noises in categorical features, especially for categories with few data
  double cat_smooth = 10.0;
487

488
489
  // check = >0
  // desc = when number of categories of one feature smaller than or equal to ``max_cat_to_onehot``, one-vs-other split algorithm will be used
Guolin Ke's avatar
Guolin Ke committed
490
491
492
  int max_cat_to_onehot = 4;

  // alias = topk
493
  // check = >0
494
  // desc = used only in ``voting`` tree learner, refer to `Voting parallel <./Parallel-Learning-Guide.rst#choose-appropriate-parallel-algorithm>`__
495
  // desc = set this to larger value for more accurate result, but it will slow down the training speed
Guolin Ke's avatar
Guolin Ke committed
496
497
498
  int top_k = 20;

  // type = multi-int
499
  // alias = mc, monotone_constraint, monotonic_cst
500
501
502
503
  // default = None
  // desc = used for constraints of monotonic features
  // desc = ``1`` means increasing, ``-1`` means decreasing, ``0`` means non-constraint
  // desc = you need to specify all features in order. For example, ``mc=-1,0,1`` means decreasing for 1st feature, non-constraint for 2nd feature and increasing for the 3rd feature
Guolin Ke's avatar
Guolin Ke committed
504
  std::vector<int8_t> monotone_constraints;
Guolin Ke's avatar
Guolin Ke committed
505

Nikita Titov's avatar
Nikita Titov committed
506
  // type = enum
507
  // alias = monotone_constraining_method, mc_method
508
  // options = basic, intermediate, advanced
509
510
511
  // desc = used only if ``monotone_constraints`` is set
  // desc = monotone constraints method
  // descl2 = ``basic``, the most basic monotone constraints method. It does not slow the library at all, but over-constrains the predictions
512
513
  // descl2 = ``intermediate``, a `more advanced method <https://hal.science/hal-02862802/document>`__, which may slow the library very slightly. However, this method is much less constraining than the basic method and should significantly improve the results
  // descl2 = ``advanced``, an `even more advanced method <https://hal.science/hal-02862802/document>`__, which may slow the library. However, this method is even less constraining than the intermediate method and should again significantly improve the results
514
515
  std::string monotone_constraints_method = "basic";

516
517
518
  // alias = monotone_splits_penalty, ms_penalty, mc_penalty
  // check = >=0.0
  // desc = used only if ``monotone_constraints`` is set
519
  // desc = `monotone penalty <https://hal.science/hal-02862802/document>`__: a penalization parameter X forbids any monotone splits on the first X (rounded down) level(s) of the tree. The penalty applied to monotone splits on a given depth is a continuous, increasing function the penalization parameter
520
521
522
  // desc = if ``0.0`` (the default), no penalization is applied
  double monotone_penalty = 0.0;

Guolin Ke's avatar
Guolin Ke committed
523
  // type = multi-double
524
  // alias = feature_contrib, fc, fp, feature_penalty
Guolin Ke's avatar
Guolin Ke committed
525
526
527
528
  // default = None
  // desc = used to control feature's split gain, will use ``gain[i] = max(0, feature_contri[i]) * gain[i]`` to replace the split gain of i-th feature
  // desc = you need to specify all features in order
  std::vector<double> feature_contri;
529

530
531
532
533
  // alias = fs, forced_splits_filename, forced_splits_file, forced_splits
  // desc = path to a ``.json`` file that specifies splits to force at the top of every decision tree before best-first learning commences
  // desc = ``.json`` file can be arbitrarily nested, and each split contains ``feature``, ``threshold`` fields, as well as ``left`` and ``right`` fields representing subsplits
  // desc = categorical splits are forced in a one-hot fashion, with ``left`` representing the split containing the feature value and ``right`` representing other values
534
  // desc = **Note**: the forced split logic will be ignored, if the split makes gain worse
535
  // desc = see `this file <https://github.com/microsoft/LightGBM/blob/master/examples/binary_classification/forced_splits.json>`__ as an example
Guolin Ke's avatar
Guolin Ke committed
536
537
  std::string forcedsplits_filename = "";

Guolin Ke's avatar
Guolin Ke committed
538
539
540
541
542
543
  // check = >=0.0
  // check = <=1.0
  // desc = decay rate of ``refit`` task, will use ``leaf_output = refit_decay_rate * old_leaf_output + (1.0 - refit_decay_rate) * new_leaf_output`` to refit trees
  // desc = used only in ``refit`` task in CLI version or as argument in ``refit`` function in language-specific package
  double refit_decay_rate = 0.9;

544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
  // check = >=0.0
  // desc = cost-effective gradient boosting multiplier for all penalties
  double cegb_tradeoff = 1.0;

  // check = >=0.0
  // desc = cost-effective gradient-boosting penalty for splitting a node
  double cegb_penalty_split = 0.0;

  // type = multi-double
  // default = 0,0,...,0
  // desc = cost-effective gradient boosting penalty for using a feature
  // desc = applied per data point
  std::vector<double> cegb_penalty_feature_lazy;

  // type = multi-double
  // default = 0,0,...,0
  // desc = cost-effective gradient boosting penalty for using a feature
  // desc = applied once per forest
562
  std::vector<double> cegb_penalty_feature_coupled;
563

Belinda Trotta's avatar
Belinda Trotta committed
564
565
566
567
568
  // check = >= 0.0
  // desc = controls smoothing applied to tree nodes
  // desc = helps prevent overfitting on leaves with few samples
  // desc = if set to zero, no smoothing is applied
  // desc = if ``path_smooth > 0`` then ``min_data_in_leaf`` must be at least ``2``
569
  // desc = larger values give stronger regularization
570
  // descl2 = the weight of each node is ``w * (n / path_smooth) / (n / path_smooth + 1) + w_p / (n / path_smooth + 1)``, where ``n`` is the number of samples in the node, ``w`` is the optimal node weight to minimise the loss (approximately ``-sum_gradients / sum_hessians``), and ``w_p`` is the weight of the parent node
Belinda Trotta's avatar
Belinda Trotta committed
571
572
573
  // descl2 = note that the parent output ``w_p`` itself has smoothing applied, unless it is the root node, so that the smoothing effect accumulates with the tree depth
  double path_smooth = 0;

574
575
576
577
  // desc = controls which features can appear in the same branch
  // desc = by default interaction constraints are disabled, to enable them you can specify
  // descl2 = for CLI, lists separated by commas, e.g. ``[0,1,2],[2,3]``
  // descl2 = for Python-package, list of lists, e.g. ``[[0, 1, 2], [2, 3]]``
578
  // descl2 = for R-package, list of character or numeric vectors, e.g. ``list(c("var1", "var2", "var3"), c("var3", "var4"))`` or ``list(c(1L, 2L, 3L), c(3L, 4L))``. Numeric vectors should use 1-based indexing, where ``1L`` is the first feature, ``2L`` is the second feature, etc
579
580
581
  // desc = any two features can only appear in the same branch only if there exists a constraint containing both features
  std::string interaction_constraints = "";

582
583
  // alias = verbose
  // desc = controls the level of LightGBM's verbosity
584
  // desc = ``< 0``: Fatal, ``= 0``: Error (Warning), ``= 1``: Info, ``> 1``: Debug
585
586
  int verbosity = 1;

587
  // [no-save]
588
589
590
591
592
593
594
  // alias = model_input, model_in
  // desc = filename of input model
  // desc = for ``prediction`` task, this model will be applied to prediction data
  // desc = for ``train`` task, training will be continued from this model
  // desc = **Note**: can be used only in CLI version
  std::string input_model = "";

595
  // [no-save]
596
597
598
599
600
  // alias = model_output, model_out
  // desc = filename of output model in training
  // desc = **Note**: can be used only in CLI version
  std::string output_model = "LightGBM_model.txt";

601
602
603
604
605
  // desc = the feature importance type in the saved model file
  // desc = ``0``: count-based feature importance (numbers of splits are counted); ``1``: gain-based feature importance (values of gain are counted)
  // desc = **Note**: can be used only in CLI version
  int saved_feature_importance_type = 0;

606
  // [no-save]
607
608
609
610
611
612
  // alias = save_period
  // desc = frequency of saving model file snapshot
  // desc = set this to positive value to enable this function. For example, the model file will be snapshotted at each iteration if ``snapshot_freq=1``
  // desc = **Note**: can be used only in CLI version
  int snapshot_freq = -1;

613
614
615
616
617
  // desc = whether to use gradient quantization when training
  // desc = enabling this will discretize (quantize) the gradients and hessians into bins of ``num_grad_quant_bins``
  // desc = with quantized training, most arithmetics in the training process will be integer operations
  // desc = gradient quantization can accelerate training, with little accuracy drop in most cases
  // desc = **Note**: can be used only with ``device_type = cpu``
618
  // desc = *New in version 4.0.0*
619
620
621
622
623
  bool use_quantized_grad = false;

  // desc = number of bins to quantization gradients and hessians
  // desc = with more bins, the quantized training will be closer to full precision training
  // desc = **Note**: can be used only with ``device_type = cpu``
624
  // desc = *New in 4.0.0*
625
626
627
628
629
  int num_grad_quant_bins = 4;

  // desc = whether to renew the leaf values with original gradients when quantized training
  // desc = renewing is very helpful for good quantized training accuracy for ranking objectives
  // desc = **Note**: can be used only with ``device_type = cpu``
630
  // desc = *New in 4.0.0*
631
632
633
  bool quant_train_renew_leaf = false;

  // desc = whether to use stochastic rounding in gradient quantization
634
  // desc = *New in 4.0.0*
635
636
  bool stochastic_rounding = true;

637
  #ifndef __NVCC__
638
639
640
641
642
  #pragma endregion

  #pragma region IO Parameters

  #pragma region Dataset Parameters
643
  #endif  // __NVCC__
644

Nikita Titov's avatar
Nikita Titov committed
645
646
647
648
  // alias = linear_trees
  // desc = fit piecewise linear gradient boosting tree
  // descl2 = tree splits are chosen in the usual way, but the model at each leaf is linear instead of constant
  // descl2 = the linear model at each leaf includes all the numerical features in that leaf's branch
649
  // descl2 = the first tree has constant leaf values
Nikita Titov's avatar
Nikita Titov committed
650
651
652
653
654
655
656
657
658
  // descl2 = categorical features are used for splits as normal but are not used in the linear models
  // descl2 = missing values should not be encoded as ``0``. Use ``np.nan`` for Python, ``NA`` for the CLI, and ``NA``, ``NA_real_``, or ``NA_integer_`` for R
  // descl2 = it is recommended to rescale data before training so that features have similar mean and standard deviation
  // descl2 = **Note**: only works with CPU and ``serial`` tree learner
  // descl2 = **Note**: ``regression_l1`` objective is not supported with linear tree boosting
  // descl2 = **Note**: setting ``linear_tree=true`` significantly increases the memory use of LightGBM
  // descl2 = **Note**: if you specify ``monotone_constraints``, constraints will be enforced when choosing the split points, but not when fitting the linear models on leaves
  bool linear_tree = false;

659
  // alias = max_bins
660
661
662
663
  // check = >1
  // desc = max number of bins that feature values will be bucketed in
  // desc = small number of bins may reduce training accuracy but may increase general power (deal with over-fitting)
  // desc = LightGBM will auto compress memory according to ``max_bin``. For example, LightGBM will use ``uint8_t`` for feature value if ``max_bin=255``
664
  int max_bin = 255;
Guolin Ke's avatar
Guolin Ke committed
665

Belinda Trotta's avatar
Belinda Trotta committed
666
667
668
669
  // type = multi-int
  // default = None
  // desc = max number of bins for each feature
  // desc = if not specified, will use ``max_bin`` for all features
670
  std::vector<int32_t> max_bin_by_feature;
Belinda Trotta's avatar
Belinda Trotta committed
671

672
673
674
  // check = >0
  // desc = minimal number of data inside one bin
  // desc = use this to avoid one-data-one-bin (potential over-fitting)
Guolin Ke's avatar
Guolin Ke committed
675
676
  int min_data_in_bin = 3;

677
678
  // alias = subsample_for_bin
  // check = >0
679
680
  // desc = number of data that sampled to construct feature discrete bins
  // desc = setting this to larger value will give better training result, but may increase data loading time
681
  // desc = set this to larger value if data is very sparse
682
  // desc = **Note**: don't set this to small values, otherwise, you may encounter unexpected errors and poor accuracy
683
684
  int bin_construct_sample_cnt = 200000;

685
  // alias = data_seed
686
  // desc = random seed for sampling data to construct histogram bins
Guolin Ke's avatar
Guolin Ke committed
687
  int data_random_seed = 1;
Guolin Ke's avatar
Guolin Ke committed
688

689
690
691
  // alias = is_sparse, enable_sparse, sparse
  // desc = used to enable/disable sparse optimization
  bool is_enable_sparse = true;
Guolin Ke's avatar
Guolin Ke committed
692

693
  // alias = is_enable_bundle, bundle
694
  // desc = set this to ``false`` to disable Exclusive Feature Bundling (EFB), which is described in `LightGBM: A Highly Efficient Gradient Boosting Decision Tree <https://papers.nips.cc/paper_files/paper/2017/hash/6449f44a102fde848669bdd9eb6b76fa-Abstract.html>`__
695
696
697
698
699
700
  // desc = **Note**: disabling this may cause the slow training speed for sparse datasets
  bool enable_bundle = true;

  // desc = set this to ``false`` to disable the special handle of missing value
  bool use_missing = true;

701
  // desc = set this to ``true`` to treat all zero as missing values (including the unshown values in LibSVM / sparse matrices)
702
703
704
  // desc = set this to ``false`` to use ``na`` for representing missing values
  bool zero_as_missing = false;

705
  // desc = set this to ``true`` (the default) to tell LightGBM to ignore the features that are unsplittable based on ``min_data_in_leaf``
706
707
708
709
710
  // desc = as dataset object is initialized only once and cannot be changed after that, you may need to set this to ``false`` when searching parameters with ``min_data_in_leaf``, otherwise features are filtered by ``min_data_in_leaf`` firstly if you don't reconstruct dataset object
  // desc = **Note**: setting this to ``false`` may slow down the training
  bool feature_pre_filter = true;

  // alias = is_pre_partition
711
  // desc = used for distributed learning (excluding the ``feature_parallel`` mode)
712
713
714
  // desc = ``true`` if training data are pre-partitioned, and different machines use different partitions
  bool pre_partition = false;

715
716
717
  // alias = two_round_loading, use_two_round_loading
  // desc = set this to ``true`` if data file is too big to fit in memory
  // desc = by default, LightGBM will map data file to memory and load features from memory. This will provide faster data loading speed, but may cause run out of memory error when the data file is very big
718
  // desc = **Note**: works only in case of loading data directly from text file
Guolin Ke's avatar
Guolin Ke committed
719
720
721
  bool two_round = false;

  // alias = has_header
722
  // desc = set this to ``true`` if input data has header
723
  // desc = **Note**: works only in case of loading data directly from text file
Guolin Ke's avatar
Guolin Ke committed
724
725
  bool header = false;

726
727
728
729
730
  // type = int or string
  // alias = label
  // desc = used to specify the label column
  // desc = use number for index, e.g. ``label=0`` means column\_0 is the label
  // desc = add a prefix ``name:`` for column name, e.g. ``label=name:is_click``
731
  // desc = if omitted, the first column in the training data is used as the label
732
  // desc = **Note**: works only in case of loading data directly from text file
Guolin Ke's avatar
Guolin Ke committed
733
  std::string label_column = "";
Guolin Ke's avatar
Guolin Ke committed
734

735
736
737
738
739
  // type = int or string
  // alias = weight
  // desc = used to specify the weight column
  // desc = use number for index, e.g. ``weight=0`` means column\_0 is the weight
  // desc = add a prefix ``name:`` for column name, e.g. ``weight=name:weight``
740
  // desc = **Note**: works only in case of loading data directly from text file
741
  // desc = **Note**: index starts from ``0`` and it doesn't count the label column when passing type is ``int``, e.g. when label is column\_0, and weight is column\_1, the correct parameter is ``weight=0``
742
  // desc = **Note**: weights should be non-negative
Guolin Ke's avatar
Guolin Ke committed
743
  std::string weight_column = "";
Guolin Ke's avatar
Guolin Ke committed
744

745
746
747
748
749
  // type = int or string
  // alias = group, group_id, query_column, query, query_id
  // desc = used to specify the query/group id column
  // desc = use number for index, e.g. ``query=0`` means column\_0 is the query id
  // desc = add a prefix ``name:`` for column name, e.g. ``query=name:query_id``
750
  // desc = **Note**: works only in case of loading data directly from text file
751
  // desc = **Note**: data should be grouped by query\_id, for more information, see `Query Data <#query-data>`__
752
  // desc = **Note**: index starts from ``0`` and it doesn't count the label column when passing type is ``int``, e.g. when label is column\_0 and query\_id is column\_1, the correct parameter is ``query=0``
Guolin Ke's avatar
Guolin Ke committed
753
  std::string group_column = "";
Guolin Ke's avatar
Guolin Ke committed
754

755
  // type = multi-int or string
Guolin Ke's avatar
Guolin Ke committed
756
  // alias = ignore_feature, blacklist
757
758
759
  // desc = used to specify some ignoring columns in training
  // desc = use number for index, e.g. ``ignore_column=0,1,2`` means column\_0, column\_1 and column\_2 will be ignored
  // desc = add a prefix ``name:`` for column name, e.g. ``ignore_column=name:c1,c2,c3`` means c1, c2 and c3 will be ignored
760
  // desc = **Note**: works only in case of loading data directly from text file
761
  // desc = **Note**: index starts from ``0`` and it doesn't count the label column when passing type is ``int``
762
  // desc = **Note**: despite the fact that specified columns will be completely ignored during the training, they still should have a valid format allowing LightGBM to load file successfully
Guolin Ke's avatar
Guolin Ke committed
763
  std::string ignore_column = "";
764

765
  // type = multi-int or string
766
  // alias = cat_feature, categorical_column, cat_column, categorical_features
767
768
769
  // desc = used to specify categorical features
  // desc = use number for index, e.g. ``categorical_feature=0,1,2`` means column\_0, column\_1 and column\_2 are categorical features
  // desc = add a prefix ``name:`` for column name, e.g. ``categorical_feature=name:c1,c2,c3`` means c1, c2 and c3 are categorical features
770
  // desc = **Note**: all values will be cast to ``int32`` (integer codes will be extracted from pandas categoricals in the Python-package)
771
772
  // desc = **Note**: index starts from ``0`` and it doesn't count the label column when passing type is ``int``
  // desc = **Note**: all values should be less than ``Int32.MaxValue`` (2147483647)
773
  // desc = **Note**: using large values could be memory consuming. Tree decision rule works best when categorical features are presented by consecutive integers starting from zero
774
  // desc = **Note**: all negative values will be treated as **missing values**
775
  // desc = **Note**: the output cannot be monotonically constrained with respect to a categorical feature
776
  // desc = **Note**: floating point numbers in categorical features will be rounded towards 0
Guolin Ke's avatar
Guolin Ke committed
777
778
  std::string categorical_feature = "";

779
780
  // desc = path to a ``.json`` file that specifies bin upper bounds for some or all features
  // desc = ``.json`` file should contain an array of objects, each containing the word ``feature`` (integer feature index) and ``bin_upper_bound`` (array of thresholds for binning)
781
  // desc = see `this file <https://github.com/microsoft/LightGBM/blob/master/examples/regression/forced_bins.json>`__ as an example
782
783
  std::string forcedbins_filename = "";

784
  // [no-save]
785
786
787
788
789
790
  // alias = is_save_binary, is_save_binary_file
  // desc = if ``true``, LightGBM will save the dataset (including validation data) to a binary file. This speed ups the data loading for the next time
  // desc = **Note**: ``init_score`` is not saved in binary file
  // desc = **Note**: can be used only in CLI version; for language-specific packages you can use the correspondent function
  bool save_binary = false;

Chen Yufei's avatar
Chen Yufei committed
791
792
793
794
  // desc = use precise floating point number parsing for text parser (e.g. CSV, TSV, LibSVM input)
  // desc = **Note**: setting this to ``true`` may lead to much slower text parsing
  bool precise_float_parser = false;

795
796
797
  // desc = path to a ``.json`` file that specifies customized parser initialized configuration
  // desc = see `lightgbm-transform <https://github.com/microsoft/lightgbm-transform>`__ for usage examples
  // desc = **Note**: ``lightgbm-transform`` is not maintained by LightGBM's maintainers. Bug reports or feature requests should go to `issues page <https://github.com/microsoft/lightgbm-transform/issues>`__
798
  // desc = *New in 4.0.0*
799
800
  std::string parser_config_file = "";

801
  #ifndef __NVCC__
802
803
804
  #pragma endregion

  #pragma region Predict Parameters
805
  #endif  // __NVCC__
806

807
808
809
810
811
812
  // [no-save]
  // desc = used only in ``prediction`` task
  // desc = used to specify from which iteration to start the prediction
  // desc = ``<= 0`` means from the first iteration
  int start_iteration_predict = 0;

813
  // [no-save]
814
815
816
817
818
  // desc = used only in ``prediction`` task
  // desc = used to specify how many trained iterations will be used in prediction
  // desc = ``<= 0`` means no limit
  int num_iteration_predict = -1;

819
  // [no-save]
820
821
822
823
  // alias = is_predict_raw_score, predict_rawscore, raw_score
  // desc = used only in ``prediction`` task
  // desc = set this to ``true`` to predict only the raw scores
  // desc = set this to ``false`` to predict transformed scores
Guolin Ke's avatar
Guolin Ke committed
824
825
  bool predict_raw_score = false;

826
  // [no-save]
827
828
829
  // alias = is_predict_leaf_index, leaf_index
  // desc = used only in ``prediction`` task
  // desc = set this to ``true`` to predict with leaf index of all trees
Guolin Ke's avatar
Guolin Ke committed
830
831
  bool predict_leaf_index = false;

832
  // [no-save]
833
834
  // alias = is_predict_contrib, contrib
  // desc = used only in ``prediction`` task
835
  // desc = set this to ``true`` to estimate `SHAP values <https://arxiv.org/abs/1706.06060>`__, which represent how each feature contributes to each prediction
836
  // desc = produces ``#features + 1`` values where the last value is the expected value of the model output over the training data
837
  // desc = **Note**: if you want to get more explanation for your model's predictions using SHAP values like SHAP interaction values, you can install `shap package <https://github.com/shap>`__
Nikita Titov's avatar
Nikita Titov committed
838
  // desc = **Note**: unlike the shap package, with ``predict_contrib`` we return a matrix with an extra column, where the last column is the expected value
839
  // desc = **Note**: this feature is not implemented for linear trees
Guolin Ke's avatar
Guolin Ke committed
840
841
  bool predict_contrib = false;

842
  // [no-save]
843
  // desc = used only in ``prediction`` task
844
845
846
847
848
  // desc = control whether or not LightGBM raises an error when you try to predict on data with a different number of features than the training data
  // desc = if ``false`` (the default), a fatal error will be raised if the number of features in the dataset you predict on differs from the number seen during training
  // desc = if ``true``, LightGBM will attempt to predict on whatever data you provide. This is dangerous because you might get incorrect predictions, but you could use it in situations where it is difficult or expensive to generate some features and you are very confident that they were never chosen for splits in the model
  // desc = **Note**: be very careful setting this parameter to ``true``
  bool predict_disable_shape_check = false;
Guolin Ke's avatar
Guolin Ke committed
849

850
  // [no-save]
851
  // desc = used only in ``prediction`` task
852
  // desc = used only in ``classification`` and ``ranking`` applications
853
  // desc = used only for predicting normal or raw scores
854
  // desc = if ``true``, will use early-stopping to speed up the prediction. May affect the accuracy
855
  // desc = **Note**: cannot be used with ``rf`` boosting type or custom objective function
856
  bool pred_early_stop = false;
857

858
  // [no-save]
859
860
  // desc = used only in ``prediction`` task
  // desc = the frequency of checking early-stopping prediction
861
  int pred_early_stop_freq = 10;
Guolin Ke's avatar
Guolin Ke committed
862

863
  // [no-save]
864
865
  // desc = used only in ``prediction`` task
  // desc = the threshold of margin in early-stopping prediction
Guolin Ke's avatar
Guolin Ke committed
866
  double pred_early_stop_margin = 10.0;
Guolin Ke's avatar
Guolin Ke committed
867

868
  // [no-save]
869
  // alias = predict_result, prediction_result, predict_name, prediction_name, pred_name, name_pred
870
  // desc = used only in ``prediction`` task
871
872
873
874
  // desc = filename of prediction result
  // desc = **Note**: can be used only in CLI version
  std::string output_result = "LightGBM_predict_result.txt";

875
  #ifndef __NVCC__
876
877
878
  #pragma endregion

  #pragma region Convert Parameters
879
  #endif  // __NVCC__
880

881
  // [no-save]
882
  // desc = used only in ``convert_model`` task
883
  // desc = only ``cpp`` is supported yet; for conversion model to other languages consider using `m2cgen <https://github.com/BayesWitnesses/m2cgen>`__ utility
884
  // desc = if ``convert_model_language`` is set and ``task=train``, the model will be also converted
885
  // desc = **Note**: can be used only in CLI version
Guolin Ke's avatar
Guolin Ke committed
886
887
  std::string convert_model_language = "";

888
  // [no-save]
889
890
891
  // alias = convert_model_file
  // desc = used only in ``convert_model`` task
  // desc = output filename of converted model
892
  // desc = **Note**: can be used only in CLI version
Guolin Ke's avatar
Guolin Ke committed
893
894
  std::string convert_model = "gbdt_prediction.cpp";

895
  #ifndef __NVCC__
896
  #pragma endregion
Guolin Ke's avatar
Guolin Ke committed
897

898
899
  #pragma endregion

Guolin Ke's avatar
Guolin Ke committed
900
  #pragma region Objective Parameters
901
  #endif  // __NVCC__
Guolin Ke's avatar
Guolin Ke committed
902

903
  // desc = used only in ``rank_xendcg`` objective
904
905
906
  // desc = random seed for objectives, if random process is needed
  int objective_seed = 5;

907
908
909
910
  // check = >0
  // alias = num_classes
  // desc = used only in ``multi-class`` classification application
  int num_class = 1;
Guolin Ke's avatar
Guolin Ke committed
911

912
  // alias = unbalance, unbalanced_sets
913
  // desc = used only in ``binary`` and ``multiclassova`` applications
914
  // desc = set this to ``true`` if training data are unbalanced
915
  // desc = **Note**: while enabling this should increase the overall performance metric of your model, it will also result in poor estimates of the individual class probabilities
916
917
  // desc = **Note**: this parameter cannot be used at the same time with ``scale_pos_weight``, choose only **one** of them
  bool is_unbalance = false;
Guolin Ke's avatar
Guolin Ke committed
918

919
  // check = >0.0
920
  // desc = used only in ``binary`` and ``multiclassova`` applications
921
  // desc = weight of labels with positive class
922
  // desc = **Note**: while enabling this should increase the overall performance metric of your model, it will also result in poor estimates of the individual class probabilities
923
924
  // desc = **Note**: this parameter cannot be used at the same time with ``is_unbalance``, choose only **one** of them
  double scale_pos_weight = 1.0;
Guolin Ke's avatar
Guolin Ke committed
925

926
927
928
929
  // check = >0.0
  // desc = used only in ``binary`` and ``multiclassova`` classification and in ``lambdarank`` applications
  // desc = parameter for the sigmoid function
  double sigmoid = 1.0;
Guolin Ke's avatar
Guolin Ke committed
930

931
  // desc = used only in ``regression``, ``binary``, ``multiclassova`` and ``cross-entropy`` applications
932
  // desc = adjusts initial score to the mean of labels for faster convergence
Guolin Ke's avatar
Guolin Ke committed
933
934
  bool boost_from_average = true;

935
936
937
938
  // desc = used only in ``regression`` application
  // desc = used to fit ``sqrt(label)`` instead of original values and prediction result will be also automatically converted to ``prediction^2``
  // desc = might be useful in case of large-range labels
  bool reg_sqrt = false;
Guolin Ke's avatar
Guolin Ke committed
939

940
941
942
943
  // check = >0.0
  // desc = used only in ``huber`` and ``quantile`` ``regression`` applications
  // desc = parameter for `Huber loss <https://en.wikipedia.org/wiki/Huber_loss>`__ and `Quantile regression <https://en.wikipedia.org/wiki/Quantile_regression>`__
  double alpha = 0.9;
Guolin Ke's avatar
Guolin Ke committed
944

945
946
947
948
  // check = >0.0
  // desc = used only in ``fair`` ``regression`` application
  // desc = parameter for `Fair loss <https://www.kaggle.com/c/allstate-claims-severity/discussion/24520>`__
  double fair_c = 1.0;
Guolin Ke's avatar
Guolin Ke committed
949

950
951
952
953
  // check = >0.0
  // desc = used only in ``poisson`` ``regression`` application
  // desc = parameter for `Poisson regression <https://en.wikipedia.org/wiki/Poisson_regression>`__ to safeguard optimization
  double poisson_max_delta_step = 0.7;
Guolin Ke's avatar
Guolin Ke committed
954

955
956
957
958
959
960
961
  // check = >=1.0
  // check = <2.0
  // desc = used only in ``tweedie`` ``regression`` application
  // desc = used to control the variance of the tweedie distribution
  // desc = set this closer to ``2`` to shift towards a **Gamma** distribution
  // desc = set this closer to ``1`` to shift towards a **Poisson** distribution
  double tweedie_variance_power = 1.5;
Guolin Ke's avatar
Guolin Ke committed
962

963
964
  // check = >0
  // desc = used only in ``lambdarank`` application
Nikita Titov's avatar
Nikita Titov committed
965
966
  // desc = controls the number of top-results to focus on during training, refer to "truncation level" in the Sec. 3 of `LambdaMART paper <https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/MSR-TR-2010-82.pdf>`__
  // desc = this parameter is closely related to the desirable cutoff ``k`` in the metric **NDCG@k** that we aim at optimizing the ranker for. The optimal setting for this parameter is likely to be slightly higher than ``k`` (e.g., ``k + 3``) to include more pairs of documents to train on, but perhaps not too high to avoid deviating too much from the desired target metric **NDCG@k**
967
  int lambdarank_truncation_level = 30;
Guolin Ke's avatar
Guolin Ke committed
968

969
970
  // desc = used only in ``lambdarank`` application
  // desc = set this to ``true`` to normalize the lambdas for different queries, and improve the performance for unbalanced data
971
972
  // desc = set this to ``false`` to enforce the original lambdarank algorithm
  bool lambdarank_norm = true;
973

974
975
976
977
978
979
980
  // type = multi-double
  // default = 0,1,3,7,15,31,63,...,2^30-1
  // desc = used only in ``lambdarank`` application
  // desc = relevant gain for labels. For example, the gain of label ``2`` is ``3`` in case of default label gains
  // desc = separate by ``,``
  std::vector<double> label_gain;

981
982
  // check = >=0.0
  // desc = used only in ``lambdarank`` application when positional information is provided and position bias is modeled. Larger values reduce the inferred position bias factors.
James Lamb's avatar
James Lamb committed
983
  // desc = *New in version 4.1.0*
984
985
  double lambdarank_position_bias_regularization = 0.0;

986
  #ifndef __NVCC__
Guolin Ke's avatar
Guolin Ke committed
987
988
989
  #pragma endregion

  #pragma region Metric Parameters
990
  #endif  // __NVCC__
991

992
993
  // [no-automatically-extract]
  // [no-save]
994
995
996
  // alias = metrics, metric_types
  // default = ""
  // type = multi-enum
997
  // desc = metric(s) to be evaluated on the evaluation set(s)
998
  // descl2 = ``""`` (empty string or not specified) means that metric corresponding to specified ``objective`` will be used (this is possible only for pre-defined objective functions, otherwise no evaluation metric will be added)
999
  // descl2 = ``"None"`` (string, **not** a ``None`` value) means that no metric will be registered, aliases: ``na``, ``null``, ``custom``
1000
1001
  // descl2 = ``l1``, absolute loss, aliases: ``mean_absolute_error``, ``mae``, ``regression_l1``
  // descl2 = ``l2``, square loss, aliases: ``mean_squared_error``, ``mse``, ``regression_l2``, ``regression``
1002
  // descl2 = ``rmse``, root square loss, aliases: ``root_mean_squared_error``, ``l2_root``
1003
1004
1005
1006
1007
1008
1009
1010
  // descl2 = ``quantile``, `Quantile regression <https://en.wikipedia.org/wiki/Quantile_regression>`__
  // descl2 = ``mape``, `MAPE loss <https://en.wikipedia.org/wiki/Mean_absolute_percentage_error>`__, aliases: ``mean_absolute_percentage_error``
  // descl2 = ``huber``, `Huber loss <https://en.wikipedia.org/wiki/Huber_loss>`__
  // descl2 = ``fair``, `Fair loss <https://www.kaggle.com/c/allstate-claims-severity/discussion/24520>`__
  // descl2 = ``poisson``, negative log-likelihood for `Poisson regression <https://en.wikipedia.org/wiki/Poisson_regression>`__
  // descl2 = ``gamma``, negative log-likelihood for **Gamma** regression
  // descl2 = ``gamma_deviance``, residual deviance for **Gamma** regression
  // descl2 = ``tweedie``, negative log-likelihood for **Tweedie** regression
1011
  // descl2 = ``ndcg``, `NDCG <https://en.wikipedia.org/wiki/Discounted_cumulative_gain#Normalized_DCG>`__, aliases: ``lambdarank``, ``rank_xendcg``, ``xendcg``, ``xe_ndcg``, ``xe_ndcg_mart``, ``xendcg_mart``
1012
1013
  // descl2 = ``map``, `MAP <https://makarandtapaswi.wordpress.com/2012/07/02/intuition-behind-average-precision-and-map/>`__, aliases: ``mean_average_precision``
  // descl2 = ``auc``, `AUC <https://en.wikipedia.org/wiki/Receiver_operating_characteristic#Area_under_the_curve>`__
1014
  // descl2 = ``average_precision``, `average precision score <https://scikit-learn.org/stable/modules/generated/sklearn.metrics.average_precision_score.html>`__
1015
1016
  // descl2 = ``binary_logloss``, `log loss <https://en.wikipedia.org/wiki/Cross_entropy>`__, aliases: ``binary``
  // descl2 = ``binary_error``, for one sample: ``0`` for correct classification, ``1`` for error classification
Belinda Trotta's avatar
Belinda Trotta committed
1017
  // descl2 = ``auc_mu``, `AUC-mu <http://proceedings.mlr.press/v97/kleiman19a/kleiman19a.pdf>`__
1018
1019
  // descl2 = ``multi_logloss``, log loss for multi-class classification, aliases: ``multiclass``, ``softmax``, ``multiclassova``, ``multiclass_ova``, ``ova``, ``ovr``
  // descl2 = ``multi_error``, error rate for multi-class classification
Guolin Ke's avatar
Guolin Ke committed
1020
1021
1022
  // descl2 = ``cross_entropy``, cross-entropy (with optional linear weights), aliases: ``xentropy``
  // descl2 = ``cross_entropy_lambda``, "intensity-weighted" cross-entropy, aliases: ``xentlambda``
  // descl2 = ``kullback_leibler``, `Kullback-Leibler divergence <https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence>`__, aliases: ``kldiv``
1023
  // desc = support multiple metrics, separated by ``,``
Guolin Ke's avatar
Guolin Ke committed
1024
1025
  std::vector<std::string> metric;

1026
  // [no-save]
1027
  // check = >0
Guolin Ke's avatar
Guolin Ke committed
1028
1029
  // alias = output_freq
  // desc = frequency for metric output
1030
  // desc = **Note**: can be used only in CLI version
Guolin Ke's avatar
Guolin Ke committed
1031
1032
  int metric_freq = 1;

1033
  // [no-save]
1034
1035
  // alias = training_metric, is_training_metric, train_metric
  // desc = set this to ``true`` to output metric result over training dataset
1036
  // desc = **Note**: can be used only in CLI version
1037
  bool is_provide_training_metric = false;
1038

1039
1040
  // type = multi-int
  // default = 1,2,3,4,5
1041
  // alias = ndcg_eval_at, ndcg_at, map_eval_at, map_at
1042
  // desc = used only with ``ndcg`` and ``map`` metrics
1043
  // desc = `NDCG <https://en.wikipedia.org/wiki/Discounted_cumulative_gain#Normalized_DCG>`__ and `MAP <https://makarandtapaswi.wordpress.com/2012/07/02/intuition-behind-average-precision-and-map/>`__ evaluation positions, separated by ``,``
Guolin Ke's avatar
Guolin Ke committed
1044
  std::vector<int> eval_at;
Guolin Ke's avatar
Guolin Ke committed
1045

Belinda Trotta's avatar
Belinda Trotta committed
1046
1047
1048
1049
1050
1051
1052
1053
  // check = >0
  // desc = used only with ``multi_error`` metric
  // desc = threshold for top-k multi-error metric
  // desc = the error on each sample is ``0`` if the true class is among the top ``multi_error_top_k`` predictions, and ``1`` otherwise
  // descl2 = more precisely, the error on a sample is ``0`` if there are at least ``num_classes - multi_error_top_k`` predictions strictly less than the prediction on the true class
  // desc = when ``multi_error_top_k=1`` this is equivalent to the usual multi-error metric
  int multi_error_top_k = 1;

Belinda Trotta's avatar
Belinda Trotta committed
1054
1055
1056
1057
1058
1059
1060
1061
1062
  // type = multi-double
  // default = None
  // desc = used only with ``auc_mu`` metric
  // desc = list representing flattened matrix (in row-major order) giving loss weights for classification errors
  // desc = list should have ``n * n`` elements, where ``n`` is the number of classes
  // desc = the matrix co-ordinate ``[i, j]`` should correspond to the ``i * n + j``-th element of the list
  // desc = if not specified, will use equal weights for all classes
  std::vector<double> auc_mu_weights;

1063
  #ifndef __NVCC__
Guolin Ke's avatar
Guolin Ke committed
1064
1065
1066
  #pragma endregion

  #pragma region Network Parameters
1067
  #endif  // __NVCC__
Guolin Ke's avatar
Guolin Ke committed
1068

1069
1070
  // check = >0
  // alias = num_machine
1071
  // desc = the number of machines for distributed learning application
1072
  // desc = this parameter is needed to be set in both **socket** and **mpi** versions
Guolin Ke's avatar
Guolin Ke committed
1073
  int num_machines = 1;
Guolin Ke's avatar
Guolin Ke committed
1074

1075
  // check = >0
1076
  // default = 12400 (random for Dask-package)
1077
1078
1079
  // alias = local_port, port
  // desc = TCP listen port for local machines
  // desc = **Note**: don't forget to allow this port in firewall settings before training
Guolin Ke's avatar
Guolin Ke committed
1080
  int local_listen_port = 12400;
Guolin Ke's avatar
Guolin Ke committed
1081

1082
1083
1084
  // check = >0
  // desc = socket time-out in minutes
  int time_out = 120;
Guolin Ke's avatar
Guolin Ke committed
1085

1086
  // alias = machine_list_file, machine_list, mlist
1087
  // desc = path of file that lists machines for this distributed learning application
1088
  // desc = each line contains one IP and one port for one machine. The format is ``ip port`` (space as a separator)
1089
  // desc = **Note**: can be used only in CLI version
Guolin Ke's avatar
Guolin Ke committed
1090
  std::string machine_list_filename = "";
Guolin Ke's avatar
Guolin Ke committed
1091

1092
1093
  // alias = workers, nodes
  // desc = list of machines in the following format: ``ip1:port1,ip2:port2``
1094
  std::string machines = "";
Guolin Ke's avatar
Guolin Ke committed
1095

1096
  #ifndef __NVCC__
Guolin Ke's avatar
Guolin Ke committed
1097
1098
1099
  #pragma endregion

  #pragma region GPU Parameters
1100
  #endif  // __NVCC__
Guolin Ke's avatar
Guolin Ke committed
1101

1102
1103
  // desc = OpenCL platform ID. Usually each GPU vendor exposes one OpenCL platform
  // desc = ``-1`` means the system-wide default platform
1104
  // desc = **Note**: refer to `GPU Targets <./GPU-Targets.rst#query-opencl-devices-in-your-system>`__ for more details
Guolin Ke's avatar
Guolin Ke committed
1105
1106
  int gpu_platform_id = -1;

1107
1108
  // desc = OpenCL device ID in the specified platform. Each GPU in the selected platform has a unique device ID
  // desc = ``-1`` means the default device in the selected platform
1109
  // desc = **Note**: refer to `GPU Targets <./GPU-Targets.rst#query-opencl-devices-in-your-system>`__ for more details
Guolin Ke's avatar
Guolin Ke committed
1110
1111
  int gpu_device_id = -1;

1112
1113
  // desc = set this to ``true`` to use double precision math on GPU (by default single precision is used)
  // desc = **Note**: can be used only in OpenCL implementation, in CUDA implementation only double precision is currently supported
Guolin Ke's avatar
Guolin Ke committed
1114
1115
  bool gpu_use_dp = false;

1116
1117
1118
1119
1120
  // check = >0
  // desc = number of GPUs
  // desc = **Note**: can be used only in CUDA implementation
  int num_gpu = 1;

1121
  #ifndef __NVCC__
Guolin Ke's avatar
Guolin Ke committed
1122
1123
1124
  #pragma endregion

  #pragma endregion
1125
  #endif  // __NVCC__
Guolin Ke's avatar
Guolin Ke committed
1126

1127
1128
  size_t file_load_progress_interval_bytes = size_t(10) * 1024 * 1024 * 1024;

Guolin Ke's avatar
Guolin Ke committed
1129
  bool is_parallel = false;
1130
  bool is_data_based_parallel = false;
Guolin Ke's avatar
Guolin Ke committed
1131
  LIGHTGBM_EXPORT void Set(const std::unordered_map<std::string, std::string>& params);
jcipar's avatar
jcipar committed
1132
  static const std::unordered_map<std::string, std::string>& alias_table();
1133
  static const std::unordered_map<std::string, std::vector<std::string>>& parameter2aliases();
jcipar's avatar
jcipar committed
1134
  static const std::unordered_set<std::string>& parameter_set();
Belinda Trotta's avatar
Belinda Trotta committed
1135
  std::vector<std::vector<double>> auc_mu_weights_matrix;
1136
  std::vector<std::vector<int>> interaction_constraints_vector;
1137
  static const std::unordered_map<std::string, std::string>& ParameterTypes();
1138
  static const std::string DumpAliases();
1139

Nikita Titov's avatar
Nikita Titov committed
1140
 private:
Guolin Ke's avatar
Guolin Ke committed
1141
  void CheckParamConflict();
Guolin Ke's avatar
Guolin Ke committed
1142
1143
  void GetMembersFromString(const std::unordered_map<std::string, std::string>& params);
  std::string SaveMembersToString() const;
Belinda Trotta's avatar
Belinda Trotta committed
1144
  void GetAucMuWeights();
1145
  void GetInteractionConstraints();
Guolin Ke's avatar
Guolin Ke committed
1146
1147
};

Guolin Ke's avatar
Guolin Ke committed
1148
inline bool Config::GetString(
Guolin Ke's avatar
Guolin Ke committed
1149
1150
  const std::unordered_map<std::string, std::string>& params,
  const std::string& name, std::string* out) {
1151
  if (params.count(name) > 0 && !params.at(name).empty()) {
Guolin Ke's avatar
Guolin Ke committed
1152
1153
1154
1155
1156
1157
    *out = params.at(name);
    return true;
  }
  return false;
}

Guolin Ke's avatar
Guolin Ke committed
1158
inline bool Config::GetInt(
Guolin Ke's avatar
Guolin Ke committed
1159
1160
  const std::unordered_map<std::string, std::string>& params,
  const std::string& name, int* out) {
1161
  if (params.count(name) > 0 && !params.at(name).empty()) {
1162
    if (!Common::AtoiAndCheck(params.at(name).c_str(), out)) {
1163
      Log::Fatal("Parameter %s should be of type int, got \"%s\"",
Guolin Ke's avatar
Guolin Ke committed
1164
                 name.c_str(), params.at(name).c_str());
1165
    }
Guolin Ke's avatar
Guolin Ke committed
1166
1167
1168
1169
1170
    return true;
  }
  return false;
}

Guolin Ke's avatar
Guolin Ke committed
1171
inline bool Config::GetDouble(
Guolin Ke's avatar
Guolin Ke committed
1172
  const std::unordered_map<std::string, std::string>& params,
1173
  const std::string& name, double* out) {
1174
  if (params.count(name) > 0 && !params.at(name).empty()) {
1175
    if (!Common::AtofAndCheck(params.at(name).c_str(), out)) {
1176
      Log::Fatal("Parameter %s should be of type double, got \"%s\"",
Guolin Ke's avatar
Guolin Ke committed
1177
                 name.c_str(), params.at(name).c_str());
1178
    }
Guolin Ke's avatar
Guolin Ke committed
1179
1180
1181
1182
1183
    return true;
  }
  return false;
}

Guolin Ke's avatar
Guolin Ke committed
1184
inline bool Config::GetBool(
Guolin Ke's avatar
Guolin Ke committed
1185
1186
  const std::unordered_map<std::string, std::string>& params,
  const std::string& name, bool* out) {
1187
  if (params.count(name) > 0 && !params.at(name).empty()) {
Guolin Ke's avatar
Guolin Ke committed
1188
    std::string value = params.at(name);
Guolin Ke's avatar
Guolin Ke committed
1189
    std::transform(value.begin(), value.end(), value.begin(), Common::tolower);
1190
    if (value == std::string("false") || value == std::string("-")) {
Guolin Ke's avatar
Guolin Ke committed
1191
      *out = false;
1192
    } else if (value == std::string("true") || value == std::string("+")) {
Guolin Ke's avatar
Guolin Ke committed
1193
      *out = true;
1194
    } else {
1195
      Log::Fatal("Parameter %s should be \"true\"/\"+\" or \"false\"/\"-\", got \"%s\"",
Guolin Ke's avatar
Guolin Ke committed
1196
                 name.c_str(), params.at(name).c_str());
Guolin Ke's avatar
Guolin Ke committed
1197
1198
1199
1200
1201
1202
    }
    return true;
  }
  return false;
}

1203
1204
1205
1206
inline bool Config::SortAlias(const std::string& x, const std::string& y) {
  return x.size() < y.size() || (x.size() == y.size() && x < y);
}

Guolin Ke's avatar
Guolin Ke committed
1207
1208
1209
1210
struct ParameterAlias {
  static void KeyAliasTransform(std::unordered_map<std::string, std::string>* params) {
    std::unordered_map<std::string, std::string> tmp_map;
    for (const auto& pair : *params) {
jcipar's avatar
jcipar committed
1211
1212
      auto alias = Config::alias_table().find(pair.first);
      if (alias != Config::alias_table().end()) {  // found alias
Guolin Ke's avatar
Guolin Ke committed
1213
        auto alias_set = tmp_map.find(alias->second);
1214
        if (alias_set != tmp_map.end()) {  // alias already set
1215
          if (Config::SortAlias(alias_set->second, pair.first)) {
1216
            Log::Warning("%s is set with %s=%s, %s=%s will be ignored. Current value: %s=%s",
Guolin Ke's avatar
Guolin Ke committed
1217
1218
                         alias->second.c_str(), alias_set->second.c_str(), params->at(alias_set->second).c_str(),
                         pair.first.c_str(), pair.second.c_str(), alias->second.c_str(), params->at(alias_set->second).c_str());
wxchan's avatar
wxchan committed
1219
          } else {
1220
            Log::Warning("%s is set with %s=%s, will be overridden by %s=%s. Current value: %s=%s",
Guolin Ke's avatar
Guolin Ke committed
1221
1222
                         alias->second.c_str(), alias_set->second.c_str(), params->at(alias_set->second).c_str(),
                         pair.first.c_str(), pair.second.c_str(), alias->second.c_str(), pair.second.c_str());
wxchan's avatar
wxchan committed
1223
1224
            tmp_map[alias->second] = pair.first;
          }
1225
        } else {  // alias not set
wxchan's avatar
wxchan committed
1226
1227
          tmp_map.emplace(alias->second, pair.first);
        }
jcipar's avatar
jcipar committed
1228
      } else if (Config::parameter_set().find(pair.first) == Config::parameter_set().end()) {
wxchan's avatar
wxchan committed
1229
        Log::Warning("Unknown parameter: %s", pair.first.c_str());
Guolin Ke's avatar
Guolin Ke committed
1230
1231
1232
      }
    }
    for (const auto& pair : tmp_map) {
wxchan's avatar
wxchan committed
1233
      auto alias = params->find(pair.first);
1234
      if (alias == params->end()) {  // not find
wxchan's avatar
wxchan committed
1235
1236
1237
        params->emplace(pair.first, params->at(pair.second));
        params->erase(pair.second);
      } else {
Guolin Ke's avatar
Guolin Ke committed
1238
1239
1240
        Log::Warning("%s is set=%s, %s=%s will be ignored. Current value: %s=%s",
                     pair.first.c_str(), alias->second.c_str(), pair.second.c_str(), params->at(pair.second).c_str(),
                     pair.first.c_str(), alias->second.c_str());
Guolin Ke's avatar
Guolin Ke committed
1241
1242
1243
1244
1245
      }
    }
  }
};

1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
inline std::string ParseObjectiveAlias(const std::string& type) {
  if (type == std::string("regression") || type == std::string("regression_l2")
    || type == std::string("mean_squared_error") || type == std::string("mse") || type == std::string("l2")
    || type == std::string("l2_root") || type == std::string("root_mean_squared_error") || type == std::string("rmse")) {
    return "regression";
  } else if (type == std::string("regression_l1") || type == std::string("mean_absolute_error")
    || type == std::string("l1") || type == std::string("mae")) {
    return "regression_l1";
  } else if (type == std::string("multiclass") || type == std::string("softmax")) {
    return "multiclass";
  } else if (type == std::string("multiclassova") || type == std::string("multiclass_ova") || type == std::string("ova") || type == std::string("ovr")) {
    return "multiclassova";
  } else if (type == std::string("xentropy") || type == std::string("cross_entropy")) {
    return "cross_entropy";
  } else if (type == std::string("xentlambda") || type == std::string("cross_entropy_lambda")) {
    return "cross_entropy_lambda";
  } else if (type == std::string("mean_absolute_percentage_error") || type == std::string("mape")) {
    return "mape";
1264
1265
1266
  } else if (type == std::string("rank_xendcg") || type == std::string("xendcg") || type == std::string("xe_ndcg")
             || type == std::string("xe_ndcg_mart") || type == std::string("xendcg_mart")) {
    return "rank_xendcg";
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
  } else if (type == std::string("none") || type == std::string("null") || type == std::string("custom") || type == std::string("na")) {
    return "custom";
  }
  return type;
}

inline std::string ParseMetricAlias(const std::string& type) {
  if (type == std::string("regression") || type == std::string("regression_l2") || type == std::string("l2") || type == std::string("mean_squared_error") || type == std::string("mse")) {
    return "l2";
  } else if (type == std::string("l2_root") || type == std::string("root_mean_squared_error") || type == std::string("rmse")) {
    return "rmse";
  } else if (type == std::string("regression_l1") || type == std::string("l1") || type == std::string("mean_absolute_error") || type == std::string("mae")) {
    return "l1";
  } else if (type == std::string("binary_logloss") || type == std::string("binary")) {
    return "binary_logloss";
1282
1283
  } else if (type == std::string("ndcg") || type == std::string("lambdarank") || type == std::string("rank_xendcg")
             || type == std::string("xendcg") || type == std::string("xe_ndcg") || type == std::string("xe_ndcg_mart") || type == std::string("xendcg_mart")) {
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
    return "ndcg";
  } else if (type == std::string("map") || type == std::string("mean_average_precision")) {
    return "map";
  } else if (type == std::string("multi_logloss") || type == std::string("multiclass") || type == std::string("softmax") || type == std::string("multiclassova") || type == std::string("multiclass_ova") || type == std::string("ova") || type == std::string("ovr")) {
    return "multi_logloss";
  } else if (type == std::string("xentropy") || type == std::string("cross_entropy")) {
    return "cross_entropy";
  } else if (type == std::string("xentlambda") || type == std::string("cross_entropy_lambda")) {
    return "cross_entropy_lambda";
  } else if (type == std::string("kldiv") || type == std::string("kullback_leibler")) {
    return "kullback_leibler";
  } else if (type == std::string("mean_absolute_percentage_error") || type == std::string("mape")) {
    return "mape";
  } else if (type == std::string("none") || type == std::string("null") || type == std::string("custom") || type == std::string("na")) {
    return "custom";
  }
  return type;
}

Guolin Ke's avatar
Guolin Ke committed
1303
1304
}   // namespace LightGBM

Belinda Trotta's avatar
Belinda Trotta committed
1305
#endif   // LightGBM_CONFIG_H_