// descl2 = label should be ``int`` type, and larger number represents the higher relevance (e.g. 0:bad, 1:fair, 2:good, 3:perfect)
// descl2 = label should be ``int`` type, and larger number represents the higher relevance (e.g. 0:bad, 1:fair, 2:good, 3:perfect)
std::stringobjective="regression";
std::stringobjective="regression";
// [doc-only]
// [no-automatically-extract]
// [no-save]
// type = enum
// type = enum
// alias = boosting_type, boost
// alias = boosting_type, boost
// options = gbdt, rf, dart
// options = gbdt, rf, dart
...
@@ -160,7 +167,7 @@ struct Config {
...
@@ -160,7 +167,7 @@ struct Config {
// descl2 = **Note**: internally, LightGBM uses ``gbdt`` mode for the first ``1 / learning_rate`` iterations
// descl2 = **Note**: internally, LightGBM uses ``gbdt`` mode for the first ``1 / learning_rate`` iterations
std::stringboosting="gbdt";
std::stringboosting="gbdt";
// [doc-only]
// [no-automatically-extract]
// type = enum
// type = enum
// options = bagging, goss
// options = bagging, goss
// desc = ``bagging``, Randomly Bagging Sampling
// desc = ``bagging``, Randomly Bagging Sampling
...
@@ -200,7 +207,8 @@ struct Config {
...
@@ -200,7 +207,8 @@ struct Config {
// desc = max number of leaves in one tree
// desc = max number of leaves in one tree
intnum_leaves=kDefaultNumLeaves;
intnum_leaves=kDefaultNumLeaves;
// [doc-only]
// [no-automatically-extract]
// [no-save]
// type = enum
// type = enum
// options = serial, feature, data, voting
// options = serial, feature, data, voting
// alias = tree, tree_type, tree_learner_type
// alias = tree, tree_type, tree_learner_type
...
@@ -222,7 +230,8 @@ struct Config {
...
@@ -222,7 +230,8 @@ struct Config {
// desc = **Note**: please **don't** change this during training, especially when running multiple jobs simultaneously by external packages, otherwise it may cause undesirable errors
// desc = **Note**: please **don't** change this during training, especially when running multiple jobs simultaneously by external packages, otherwise it may cause undesirable errors
intnum_threads=0;
intnum_threads=0;
// [doc-only]
// [no-automatically-extract]
// [no-save]
// type = enum
// type = enum
// options = cpu, gpu, cuda
// options = cpu, gpu, cuda
// alias = device
// alias = device
...
@@ -235,7 +244,7 @@ struct Config {
...
@@ -235,7 +244,7 @@ struct Config {
// desc = **Note**: refer to `Installation Guide <./Installation-Guide.rst#build-gpu-version>`__ to build LightGBM with GPU support
// desc = **Note**: refer to `Installation Guide <./Installation-Guide.rst#build-gpu-version>`__ to build LightGBM with GPU support
std::stringdevice_type="cpu";
std::stringdevice_type="cpu";
// [doc-only]
// [no-automatically-extract]
// alias = random_seed, random_state
// alias = random_seed, random_state
// default = None
// default = None
// desc = this seed is used to generate other seeds, e.g. ``data_random_seed``, ``feature_fraction_seed``, etc.
// desc = this seed is used to generate other seeds, e.g. ``data_random_seed``, ``feature_fraction_seed``, etc.
...
@@ -593,7 +602,6 @@ struct Config {
...
@@ -593,7 +602,6 @@ struct Config {
// desc = **Note**: can be used only in CLI version
// desc = **Note**: can be used only in CLI version
intsnapshot_freq=-1;
intsnapshot_freq=-1;
// [no-save]
// desc = whether to use gradient quantization when training
// desc = whether to use gradient quantization when training
// desc = enabling this will discretize (quantize) the gradients and hessians into bins of ``num_grad_quant_bins``
// desc = enabling this will discretize (quantize) the gradients and hessians into bins of ``num_grad_quant_bins``
// desc = with quantized training, most arithmetics in the training process will be integer operations
// desc = with quantized training, most arithmetics in the training process will be integer operations
...
@@ -602,21 +610,18 @@ struct Config {
...
@@ -602,21 +610,18 @@ struct Config {
// desc = *New in version 4.0.0*
// desc = *New in version 4.0.0*
booluse_quantized_grad=false;
booluse_quantized_grad=false;
// [no-save]
// desc = number of bins to quantization gradients and hessians
// desc = number of bins to quantization gradients and hessians
// desc = with more bins, the quantized training will be closer to full precision training
// desc = with more bins, the quantized training will be closer to full precision training
// desc = **Note**: can be used only with ``device_type = cpu``
// desc = **Note**: can be used only with ``device_type = cpu``
// desc = *New in 4.0.0*
// desc = *New in 4.0.0*
intnum_grad_quant_bins=4;
intnum_grad_quant_bins=4;
// [no-save]
// desc = whether to renew the leaf values with original gradients when quantized training
// desc = whether to renew the leaf values with original gradients when quantized training
// desc = renewing is very helpful for good quantized training accuracy for ranking objectives
// desc = renewing is very helpful for good quantized training accuracy for ranking objectives
// desc = **Note**: can be used only with ``device_type = cpu``
// desc = **Note**: can be used only with ``device_type = cpu``
// desc = *New in 4.0.0*
// desc = *New in 4.0.0*
boolquant_train_renew_leaf=false;
boolquant_train_renew_leaf=false;
// [no-save]
// desc = whether to use stochastic rounding in gradient quantization
// desc = whether to use stochastic rounding in gradient quantization