"tests/vscode:/vscode.git/clone" did not exist on "5c8a331bf5966f1df546f154f64ed9b8856a90ee"
Unverified Commit 77d92b7c authored by Guolin Ke's avatar Guolin Ke Committed by GitHub
Browse files

speed up `FindBestThresholdFromHistogram` (#2867)

* speed up for const hessian

* rename template

* some refactorings

* refine

* refine

* simplify codes

* fix random in feature histogram

* code refine

* refine

* try fix

* make gcc happy

* remove timer

* rollback some changes

* more templates

* fix a bug

* reduce the cost of timer

* fix gpu

* fix bug

* fix gpu
parent 7776cfea
......@@ -11,7 +11,7 @@
#include <string>
#include <vector>
#include <LightGBM/json11.hpp>
#include <LightGBM/utils/json11.h>
namespace LightGBM {
......@@ -48,6 +48,8 @@ class TreeLearner {
*/
virtual void ResetConfig(const Config* config) = 0;
virtual void SetForcedSplit(const Json* forced_split_json) = 0;
/*!
* \brief training tree model on dataset
* \param gradients The first order gradients
......@@ -55,8 +57,7 @@ class TreeLearner {
* \param is_constant_hessian True if all hessians share the same value
* \return A trained tree
*/
virtual Tree* Train(const score_t* gradients, const score_t* hessians,
const Json& forced_split_json) = 0;
virtual Tree* Train(const score_t* gradients, const score_t* hessians) = 0;
/*!
* \brief use an existing tree to fit the new gradients and hessians.
......
......@@ -1089,11 +1089,10 @@ class Timer {
// Note: this class is not thread-safe, don't use it inside omp blocks
class FunctionTimer {
public:
#ifdef TIMETAG
FunctionTimer(const std::string& name, Timer& timer) : timer_(timer) {
timer.Start(name);
#ifdef TIMETAG
name_ = name;
#endif // TIMETAG
}
~FunctionTimer() { timer_.Stop(name_); }
......@@ -1101,6 +1100,9 @@ class FunctionTimer {
private:
std::string name_;
Timer& timer_;
#else
FunctionTimer(const std::string&, Timer&) {}
#endif // TIMETAG
};
} // namespace Common
......
......@@ -58,14 +58,13 @@ void GBDT::Init(const Config* config, const Dataset* train_data, const Objective
es_first_metric_only_ = config_->first_metric_only;
shrinkage_rate_ = config_->learning_rate;
std::string forced_splits_path = config->forcedsplits_filename;
// load forced_splits file
if (forced_splits_path != "") {
std::ifstream forced_splits_file(forced_splits_path.c_str());
std::stringstream buffer;
buffer << forced_splits_file.rdbuf();
std::string err;
forced_splits_json_ = Json::parse(buffer.str(), err);
if (!config->forcedsplits_filename.empty()) {
std::ifstream forced_splits_file(config->forcedsplits_filename.c_str());
std::stringstream buffer;
buffer << forced_splits_file.rdbuf();
std::string err;
forced_splits_json_ = Json::parse(buffer.str(), err);
}
objective_function_ = objective_function;
......@@ -81,6 +80,7 @@ void GBDT::Init(const Config* config, const Dataset* train_data, const Objective
// init tree learner
tree_learner_->Init(train_data_, is_constant_hessian_);
tree_learner_->SetForcedSplit(&forced_splits_json_);
// push training metrics
training_metrics_.clear();
......@@ -366,7 +366,7 @@ bool GBDT::TrainOneIter(const score_t* gradients, const score_t* hessians) {
grad = gradients_.data() + offset;
hess = hessians_.data() + offset;
}
new_tree.reset(tree_learner_->Train(grad, hess, forced_splits_json_));
new_tree.reset(tree_learner_->Train(grad, hess));
}
if (new_tree->num_leaves() > 1) {
......@@ -717,6 +717,21 @@ void GBDT::ResetConfig(const Config* config) {
if (train_data_ != nullptr) {
ResetBaggingConfig(new_config.get(), false);
}
if (config_->forcedsplits_filename != new_config->forcedbins_filename) {
// load forced_splits file
if (!new_config->forcedsplits_filename.empty()) {
std::ifstream forced_splits_file(
new_config->forcedsplits_filename.c_str());
std::stringstream buffer;
buffer << forced_splits_file.rdbuf();
std::string err;
forced_splits_json_ = Json::parse(buffer.str(), err);
tree_learner_->SetForcedSplit(&forced_splits_json_);
} else {
forced_splits_json_ = Json();
tree_learner_->SetForcedSplit(nullptr);
}
}
config_.reset(new_config.release());
}
......
......@@ -21,7 +21,7 @@
#include <utility>
#include <vector>
#include <LightGBM/json11.hpp>
#include <LightGBM/utils/json11.h>
#include "score_updater.hpp"
namespace LightGBM {
......
......@@ -125,7 +125,7 @@ class RF : public GBDT {
hess = tmp_hess_.data();
}
new_tree.reset(tree_learner_->Train(grad, hess, forced_splits_json_));
new_tree.reset(tree_learner_->Train(grad, hess));
}
if (new_tree->num_leaves() > 1) {
......
......@@ -295,7 +295,6 @@ class Booster {
if (param.count("metric")) {
Log::Fatal("Cannot change metric during training");
}
CheckDatasetResetConfig(config_, param);
config_.Set(param);
......
......@@ -293,6 +293,12 @@ void Config::CheckParamConflict() {
histogram_pool_size = -1;
}
}
if (is_data_based_parallel) {
if (!forcedsplits_filename.empty()) {
Log::Fatal("Don't support forcedsplits in %s tree learner",
tree_learner.c_str());
}
}
// Check max_depth and num_leaves
if (max_depth > 0) {
double full_num_leaves = std::pow(2, max_depth);
......
......@@ -11,7 +11,7 @@
#include <fstream>
#include <LightGBM/json11.hpp>
#include <LightGBM/utils/json11.h>
namespace LightGBM {
......
......@@ -18,7 +18,7 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <LightGBM/json11.hpp>
#include <LightGBM/utils/json11.h>
#include <limits>
#include <cassert>
......
/*!
* Copyright (c) 2020 Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See LICENSE file in the project root for
* license information.
*/
#ifndef LIGHTGBM_TREELEARNER_COL_SAMPLER_HPP_
#define LIGHTGBM_TREELEARNER_COL_SAMPLER_HPP_
#include <LightGBM/dataset.h>
#include <LightGBM/meta.h>
#include <LightGBM/utils/common.h>
#include <LightGBM/utils/openmp_wrapper.h>
#include <LightGBM/utils/random.h>
namespace LightGBM {
class ColSampler {
public:
ColSampler(const Config* config)
: fraction_bytree_(config->feature_fraction),
fraction_bynode_(config->feature_fraction_bynode),
seed_(config->feature_fraction_seed),
random_(config->feature_fraction_seed) {
}
static int GetCnt(size_t total_cnt, double fraction) {
const int min = std::min(2, static_cast<int>(total_cnt));
int used_feature_cnt = static_cast<int>(Common::RoundInt(total_cnt * fraction));
return std::max(used_feature_cnt, min);
}
void SetTrainingData(const Dataset* train_data) {
train_data_ = train_data;
is_feature_used_.resize(train_data_->num_features(), 1);
valid_feature_indices_ = train_data->ValidFeatureIndices();
if (fraction_bytree_ >= 1.0f) {
need_reset_bytree_ = false;
used_cnt_bytree_ = static_cast<int>(valid_feature_indices_.size());
} else {
need_reset_bytree_ = true;
used_cnt_bytree_ =
GetCnt(valid_feature_indices_.size(), fraction_bytree_);
}
ResetByTree();
}
void SetConfig(const Config* config) {
fraction_bytree_ = config->feature_fraction;
fraction_bynode_ = config->feature_fraction_bynode;
is_feature_used_.resize(train_data_->num_features(), 1);
// seed is changed
if (seed_ != config->feature_fraction_seed) {
seed_ = config->feature_fraction_seed;
random_ = Random(seed_);
}
if (fraction_bytree_ >= 1.0f) {
need_reset_bytree_ = false;
used_cnt_bytree_ = static_cast<int>(valid_feature_indices_.size());
} else {
need_reset_bytree_ = true;
used_cnt_bytree_ =
GetCnt(valid_feature_indices_.size(), fraction_bytree_);
}
ResetByTree();
}
void ResetByTree() {
if (need_reset_bytree_) {
std::memset(is_feature_used_.data(), 0,
sizeof(int8_t) * is_feature_used_.size());
used_feature_indices_ = random_.Sample(
static_cast<int>(valid_feature_indices_.size()), used_cnt_bytree_);
int omp_loop_size = static_cast<int>(used_feature_indices_.size());
#pragma omp parallel for schedule(static, 512) if (omp_loop_size >= 1024)
for (int i = 0; i < omp_loop_size; ++i) {
int used_feature = valid_feature_indices_[used_feature_indices_[i]];
int inner_feature_index = train_data_->InnerFeatureIndex(used_feature);
is_feature_used_[inner_feature_index] = 1;
}
}
}
std::vector<int8_t> GetByNode() {
if (fraction_bynode_ >= 1.0f) {
return std::vector<int8_t>(train_data_->num_features(), 1);
}
std::vector<int8_t> ret(train_data_->num_features(), 0);
if (need_reset_bytree_) {
auto used_feature_cnt = GetCnt(used_feature_indices_.size(), fraction_bynode_);
auto sampled_indices = random_.Sample(
static_cast<int>(used_feature_indices_.size()), used_feature_cnt);
int omp_loop_size = static_cast<int>(sampled_indices.size());
#pragma omp parallel for schedule(static, 512) if (omp_loop_size >= 1024)
for (int i = 0; i < omp_loop_size; ++i) {
int used_feature =
valid_feature_indices_[used_feature_indices_[sampled_indices[i]]];
int inner_feature_index = train_data_->InnerFeatureIndex(used_feature);
ret[inner_feature_index] = 1;
}
} else {
auto used_feature_cnt =
GetCnt(valid_feature_indices_.size(), fraction_bynode_);
auto sampled_indices = random_.Sample(
static_cast<int>(valid_feature_indices_.size()), used_feature_cnt);
int omp_loop_size = static_cast<int>(sampled_indices.size());
#pragma omp parallel for schedule(static, 512) if (omp_loop_size >= 1024)
for (int i = 0; i < omp_loop_size; ++i) {
int used_feature = valid_feature_indices_[sampled_indices[i]];
int inner_feature_index = train_data_->InnerFeatureIndex(used_feature);
ret[inner_feature_index] = 1;
}
}
return ret;
}
const std::vector<int8_t>& is_feature_used_bytree() const {
return is_feature_used_;
}
void SetIsFeatureUsedByTree(int fid, bool val) {
is_feature_used_[fid] = val;
}
private:
const Dataset* train_data_;
double fraction_bytree_;
double fraction_bynode_;
bool need_reset_bytree_;
int used_cnt_bytree_;
int seed_;
Random random_;
std::vector<int8_t> is_feature_used_;
std::vector<int> used_feature_indices_;
std::vector<int> valid_feature_indices_;
};
} // namespace LightGBM
#endif // LIGHTGBM_TREELEARNER_COL_SAMPLER_HPP_
......@@ -57,7 +57,7 @@ void DataParallelTreeLearner<TREELEARNER_T>::BeforeTrain() {
for (int i = 0; i < this->train_data_->num_total_features(); ++i) {
int inner_feature_index = this->train_data_->InnerFeatureIndex(i);
if (inner_feature_index == -1) { continue; }
if (this->is_feature_used_[inner_feature_index]) {
if (this->col_sampler_.is_feature_used_bytree()[inner_feature_index]) {
int cur_min_machine = static_cast<int>(ArrayArgs<int>::ArgMin(num_bins_distributed));
feature_distribution[cur_min_machine].push_back(inner_feature_index);
auto num_bin = this->train_data_->FeatureNumBin(inner_feature_index);
......@@ -147,11 +147,13 @@ void DataParallelTreeLearner<TREELEARNER_T>::BeforeTrain() {
template <typename TREELEARNER_T>
void DataParallelTreeLearner<TREELEARNER_T>::FindBestSplits() {
TREELEARNER_T::ConstructHistograms(this->is_feature_used_, true);
TREELEARNER_T::ConstructHistograms(
this->col_sampler_.is_feature_used_bytree(), true);
// construct local histograms
#pragma omp parallel for schedule(static)
for (int feature_index = 0; feature_index < this->num_features_; ++feature_index) {
if ((!this->is_feature_used_.empty() && this->is_feature_used_[feature_index] == false)) continue;
if (this->col_sampler_.is_feature_used_bytree()[feature_index] == false)
continue;
// copy to buffer
std::memcpy(input_buffer_.data() + buffer_write_start_pos_[feature_index],
this->smaller_leaf_histogram_array_[feature_index].RawData(),
......@@ -160,19 +162,18 @@ void DataParallelTreeLearner<TREELEARNER_T>::FindBestSplits() {
// Reduce scatter for histogram
Network::ReduceScatter(input_buffer_.data(), reduce_scatter_size_, sizeof(hist_t), block_start_.data(),
block_len_.data(), output_buffer_.data(), static_cast<comm_size_t>(output_buffer_.size()), &HistogramSumReducer);
this->FindBestSplitsFromHistograms(this->is_feature_used_, true);
this->FindBestSplitsFromHistograms(
this->col_sampler_.is_feature_used_bytree(), true);
}
template <typename TREELEARNER_T>
void DataParallelTreeLearner<TREELEARNER_T>::FindBestSplitsFromHistograms(const std::vector<int8_t>&, bool) {
std::vector<SplitInfo> smaller_bests_per_thread(this->share_state_->num_threads);
std::vector<SplitInfo> larger_bests_per_thread(this->share_state_->num_threads);
std::vector<int8_t> smaller_node_used_features(this->num_features_, 1);
std::vector<int8_t> larger_node_used_features(this->num_features_, 1);
if (this->config_->feature_fraction_bynode < 1.0f) {
smaller_node_used_features = this->GetUsedFeatures(false);
larger_node_used_features = this->GetUsedFeatures(false);
}
std::vector<int8_t> smaller_node_used_features =
this->col_sampler_.GetByNode();
std::vector<int8_t> larger_node_used_features =
this->col_sampler_.GetByNode();
OMP_INIT_EX();
#pragma omp parallel for schedule(static)
for (int feature_index = 0; feature_index < this->num_features_; ++feature_index) {
......@@ -241,7 +242,7 @@ void DataParallelTreeLearner<TREELEARNER_T>::FindBestSplitsFromHistograms(const
template <typename TREELEARNER_T>
void DataParallelTreeLearner<TREELEARNER_T>::Split(Tree* tree, int best_Leaf, int* left_leaf, int* right_leaf) {
this->SplitInner(tree, best_Leaf, left_leaf, right_leaf, false);
TREELEARNER_T::SplitInner(tree, best_Leaf, left_leaf, right_leaf, false);
const SplitInfo& best_split_info = this->best_split_per_leaf_[best_Leaf];
// need update global number of data in leaf
global_data_count_in_leaf_[*left_leaf] = best_split_info.left_count;
......
/*!
* Copyright (c) 2016 Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See LICENSE file in the project root for license information.
* Licensed under the MIT License. See LICENSE file in the project root for
* license information.
*/
#ifndef LIGHTGBM_TREELEARNER_FEATURE_HISTOGRAM_HPP_
#define LIGHTGBM_TREELEARNER_FEATURE_HISTOGRAM_HPP_
......@@ -32,18 +33,19 @@ class FeatureMetainfo {
/*! \brief pointer of tree config */
const Config* config;
BinType bin_type;
/*! \brief random number generator for extremely randomized trees */
mutable Random rand;
;
};
/*!
* \brief FeatureHistogram is used to construct and store a histogram for a feature.
*/
* \brief FeatureHistogram is used to construct and store a histogram for a
* feature.
*/
class FeatureHistogram {
public:
FeatureHistogram() {
data_ = nullptr;
}
FeatureHistogram() { data_ = nullptr; }
~FeatureHistogram() {
}
~FeatureHistogram() {}
/*! \brief Disable copy */
FeatureHistogram& operator=(const FeatureHistogram&) = delete;
......@@ -51,111 +53,228 @@ class FeatureHistogram {
FeatureHistogram(const FeatureHistogram&) = delete;
/*!
* \brief Init the feature histogram
* \param feature the feature data for this histogram
* \param min_num_data_one_leaf minimal number of data in one leaf
*/
* \brief Init the feature histogram
* \param feature the feature data for this histogram
* \param min_num_data_one_leaf minimal number of data in one leaf
*/
void Init(hist_t* data, const FeatureMetainfo* meta) {
meta_ = meta;
data_ = data;
if (meta_->bin_type == BinType::NumericalBin) {
find_best_threshold_fun_ = std::bind(&FeatureHistogram::FindBestThresholdNumerical, this, std::placeholders::_1,
std::placeholders::_2, std::placeholders::_3, std::placeholders::_4, std::placeholders::_5);
FuncForNumrical();
} else {
find_best_threshold_fun_ = std::bind(&FeatureHistogram::FindBestThresholdCategorical, this, std::placeholders::_1,
std::placeholders::_2, std::placeholders::_3, std::placeholders::_4, std::placeholders::_5);
FuncForCategorical();
}
rand_ = Random(meta_->config->extra_seed);
}
hist_t* RawData() {
return data_;
}
hist_t* RawData() { return data_; }
/*!
* \brief Subtract current histograms with other
* \param other The histogram that want to subtract
*/
* \brief Subtract current histograms with other
* \param other The histogram that want to subtract
*/
void Subtract(const FeatureHistogram& other) {
for (int i = 0; i < (meta_->num_bin - meta_->offset) * 2; ++i) {
data_[i] -= other.data_[i];
}
}
void FindBestThreshold(double sum_gradient, double sum_hessian, data_size_t num_data,
const ConstraintEntry& constraints, SplitInfo* output) {
void FindBestThreshold(double sum_gradient, double sum_hessian,
data_size_t num_data,
const ConstraintEntry& constraints,
SplitInfo* output) {
output->default_left = true;
output->gain = kMinScore;
find_best_threshold_fun_(sum_gradient, sum_hessian + 2 * kEpsilon, num_data, constraints, output);
find_best_threshold_fun_(sum_gradient, sum_hessian + 2 * kEpsilon, num_data,
constraints, output);
output->gain *= meta_->penalty;
}
void FindBestThresholdNumerical(double sum_gradient, double sum_hessian, data_size_t num_data,
const ConstraintEntry& constraints, SplitInfo* output) {
template <bool USE_RAND, bool USE_L1, bool USE_MAX_OUTPUT>
double BeforeNumercal(double sum_gradient, double sum_hessian,
SplitInfo* output, int* rand_threshold) {
is_splittable_ = false;
double gain_shift = GetLeafSplitGain(sum_gradient, sum_hessian,
meta_->config->lambda_l1, meta_->config->lambda_l2, meta_->config->max_delta_step);
double min_gain_shift = gain_shift + meta_->config->min_gain_to_split;
int rand_threshold = 0;
if (meta_->num_bin - 2 > 0) {
rand_threshold = rand_.NextInt(0, meta_->num_bin - 2);
output->monotone_type = meta_->monotone_type;
double gain_shift = GetLeafGain<USE_L1, USE_MAX_OUTPUT>(
sum_gradient, sum_hessian, meta_->config->lambda_l1,
meta_->config->lambda_l2, meta_->config->max_delta_step);
*rand_threshold = 0;
if (USE_RAND) {
if (meta_->num_bin - 2 > 0) {
*rand_threshold = meta_->rand.NextInt(0, meta_->num_bin - 2);
}
}
const bool is_rand = meta_->config->extra_trees;
return gain_shift + meta_->config->min_gain_to_split;
}
void FuncForNumrical() {
if (meta_->config->extra_trees) {
if (meta_->config->monotone_constraints.empty()) {
FuncForNumricalL1<true, false>();
} else {
FuncForNumricalL1<true, true>();
}
} else {
if (meta_->config->monotone_constraints.empty()) {
FuncForNumricalL1<false, false>();
} else {
FuncForNumricalL1<false, true>();
}
}
}
template <bool USE_RAND, bool USE_MC>
void FuncForNumricalL1() {
if (meta_->config->lambda_l1 > 0) {
if (meta_->config->max_delta_step > 0) {
FuncForNumricalL2<USE_RAND, USE_MC, true, true>();
} else {
FuncForNumricalL2<USE_RAND, USE_MC, true, false>();
}
} else {
if (meta_->config->max_delta_step > 0) {
FuncForNumricalL2<USE_RAND, USE_MC, false, true>();
} else {
FuncForNumricalL2<USE_RAND, USE_MC, false, false>();
}
}
}
template <bool USE_RAND, bool USE_MC, bool USE_L1, bool USE_MAX_OUTPUT>
void FuncForNumricalL2() {
if (meta_->num_bin > 2 && meta_->missing_type != MissingType::None) {
if (meta_->missing_type == MissingType::Zero) {
if (is_rand) {
FindBestThresholdSequence<true>(sum_gradient, sum_hessian, num_data, constraints, min_gain_shift, output, -1, true, false, rand_threshold);
FindBestThresholdSequence<true>(sum_gradient, sum_hessian, num_data, constraints, min_gain_shift, output, 1, true, false, rand_threshold);
} else {
FindBestThresholdSequence<false>(sum_gradient, sum_hessian, num_data, constraints, min_gain_shift, output, -1, true, false, rand_threshold);
FindBestThresholdSequence<false>(sum_gradient, sum_hessian, num_data, constraints, min_gain_shift, output, 1, true, false, rand_threshold);
}
find_best_threshold_fun_ =
[=](double sum_gradient, double sum_hessian, data_size_t num_data,
const ConstraintEntry& constraints, SplitInfo* output) {
int rand_threshold = 0;
double min_gain_shift =
BeforeNumercal<USE_RAND, USE_L1, USE_MAX_OUTPUT>(
sum_gradient, sum_hessian, output, &rand_threshold);
FindBestThresholdSequence<USE_RAND, USE_MC, USE_L1,
USE_MAX_OUTPUT, true, true, false>(
sum_gradient, sum_hessian, num_data, constraints,
min_gain_shift, output, rand_threshold);
FindBestThresholdSequence<USE_RAND, USE_MC, USE_L1,
USE_MAX_OUTPUT, false, true, false>(
sum_gradient, sum_hessian, num_data, constraints,
min_gain_shift, output, rand_threshold);
};
} else {
if (is_rand) {
FindBestThresholdSequence<true>(sum_gradient, sum_hessian, num_data, constraints, min_gain_shift, output, -1, false, true, rand_threshold);
FindBestThresholdSequence<true>(sum_gradient, sum_hessian, num_data, constraints, min_gain_shift, output, 1, false, true, rand_threshold);
} else {
FindBestThresholdSequence<false>(sum_gradient, sum_hessian, num_data, constraints, min_gain_shift, output, -1, false, true, rand_threshold);
FindBestThresholdSequence<false>(sum_gradient, sum_hessian, num_data, constraints, min_gain_shift, output, 1, false, true, rand_threshold);
}
find_best_threshold_fun_ =
[=](double sum_gradient, double sum_hessian, data_size_t num_data,
const ConstraintEntry& constraints, SplitInfo* output) {
int rand_threshold = 0;
double min_gain_shift =
BeforeNumercal<USE_RAND, USE_L1, USE_MAX_OUTPUT>(
sum_gradient, sum_hessian, output, &rand_threshold);
FindBestThresholdSequence<USE_RAND, USE_MC, USE_L1,
USE_MAX_OUTPUT, true, false, true>(
sum_gradient, sum_hessian, num_data, constraints,
min_gain_shift, output, rand_threshold);
FindBestThresholdSequence<USE_RAND, USE_MC, USE_L1,
USE_MAX_OUTPUT, false, false, true>(
sum_gradient, sum_hessian, num_data, constraints,
min_gain_shift, output, rand_threshold);
};
}
} else {
if (is_rand) {
FindBestThresholdSequence<true>(sum_gradient, sum_hessian, num_data, constraints, min_gain_shift, output, -1, false, false, rand_threshold);
if (meta_->missing_type != MissingType::NaN) {
find_best_threshold_fun_ =
[=](double sum_gradient, double sum_hessian, data_size_t num_data,
const ConstraintEntry& constraints, SplitInfo* output) {
int rand_threshold = 0;
double min_gain_shift =
BeforeNumercal<USE_RAND, USE_L1, USE_MAX_OUTPUT>(
sum_gradient, sum_hessian, output, &rand_threshold);
FindBestThresholdSequence<USE_RAND, USE_MC, USE_L1,
USE_MAX_OUTPUT, true, false, false>(
sum_gradient, sum_hessian, num_data, constraints,
min_gain_shift, output, rand_threshold);
};
} else {
FindBestThresholdSequence<false>(sum_gradient, sum_hessian, num_data, constraints, min_gain_shift, output, -1, false, false, rand_threshold);
}
// fix the direction error when only have 2 bins
if (meta_->missing_type == MissingType::NaN) {
output->default_left = false;
find_best_threshold_fun_ =
[=](double sum_gradient, double sum_hessian, data_size_t num_data,
const ConstraintEntry& constraints, SplitInfo* output) {
int rand_threshold = 0;
double min_gain_shift =
BeforeNumercal<USE_RAND, USE_L1, USE_MAX_OUTPUT>(
sum_gradient, sum_hessian, output, &rand_threshold);
FindBestThresholdSequence<USE_RAND, USE_MC, USE_L1,
USE_MAX_OUTPUT, true, false, false>(
sum_gradient, sum_hessian, num_data, constraints,
min_gain_shift, output, rand_threshold);
output->default_left = false;
};
}
}
output->gain -= min_gain_shift;
output->monotone_type = meta_->monotone_type;
}
void FindBestThresholdCategorical(double sum_gradient, double sum_hessian,
data_size_t num_data,
const ConstraintEntry& constraints,
SplitInfo* output) {
void FuncForCategorical() {
if (meta_->config->extra_trees) {
FindBestThresholdCategoricalInner<true>(sum_gradient, sum_hessian,
num_data, constraints, output);
if (meta_->config->monotone_constraints.empty()) {
FuncForCategoricalL1<true, false>();
} else {
FuncForCategoricalL1<true, true>();
}
} else {
if (meta_->config->monotone_constraints.empty()) {
FuncForCategoricalL1<false, false>();
} else {
FuncForCategoricalL1<false, true>();
}
}
}
template <bool USE_RAND, bool USE_MC>
void FuncForCategoricalL1() {
if (meta_->config->lambda_l1 > 0) {
if (meta_->config->max_delta_step > 0) {
find_best_threshold_fun_ =
std::bind(&FeatureHistogram::FindBestThresholdCategoricalInner<
USE_RAND, USE_MC, true, true>,
this, std::placeholders::_1, std::placeholders::_2,
std::placeholders::_3, std::placeholders::_4,
std::placeholders::_5);
} else {
find_best_threshold_fun_ =
std::bind(&FeatureHistogram::FindBestThresholdCategoricalInner<
USE_RAND, USE_MC, true, false>,
this, std::placeholders::_1, std::placeholders::_2,
std::placeholders::_3, std::placeholders::_4,
std::placeholders::_5);
}
} else {
FindBestThresholdCategoricalInner<false>(sum_gradient, sum_hessian,
num_data, constraints, output);
if (meta_->config->max_delta_step > 0) {
find_best_threshold_fun_ =
std::bind(&FeatureHistogram::FindBestThresholdCategoricalInner<
USE_RAND, USE_MC, false, true>,
this, std::placeholders::_1, std::placeholders::_2,
std::placeholders::_3, std::placeholders::_4,
std::placeholders::_5);
} else {
find_best_threshold_fun_ =
std::bind(&FeatureHistogram::FindBestThresholdCategoricalInner<
USE_RAND, USE_MC, false, false>,
this, std::placeholders::_1, std::placeholders::_2,
std::placeholders::_3, std::placeholders::_4,
std::placeholders::_5);
}
}
}
template<bool IS_RAND>
void FindBestThresholdCategoricalInner(double sum_gradient, double sum_hessian, data_size_t num_data,
const ConstraintEntry& constraints, SplitInfo* output) {
template <bool USE_RAND, bool USE_MC, bool USE_L1, bool USE_MAX_OUTPUT>
void FindBestThresholdCategoricalInner(double sum_gradient,
double sum_hessian,
data_size_t num_data,
const ConstraintEntry& constraints,
SplitInfo* output) {
is_splittable_ = false;
output->default_left = false;
double best_gain = kMinScore;
data_size_t best_left_count = 0;
double best_sum_left_gradient = 0;
double best_sum_left_hessian = 0;
double gain_shift = GetLeafSplitGain(sum_gradient, sum_hessian,
meta_->config->lambda_l1, meta_->config->lambda_l2, meta_->config->max_delta_step);
double gain_shift = GetLeafGain<USE_L1, USE_MAX_OUTPUT>(
sum_gradient, sum_hessian, meta_->config->lambda_l1,
meta_->config->lambda_l2, meta_->config->max_delta_step);
double min_gain_shift = gain_shift + meta_->config->min_gain_to_split;
bool is_full_categorical = meta_->missing_type == MissingType::None;
......@@ -169,35 +288,40 @@ class FeatureHistogram {
const double cnt_factor = num_data / sum_hessian;
int rand_threshold = 0;
if (use_onehot) {
if (IS_RAND) {
if (USE_RAND) {
if (used_bin > 0) {
rand_threshold = rand_.NextInt(0, used_bin);
rand_threshold = meta_->rand.NextInt(0, used_bin);
}
}
for (int t = 0; t < used_bin; ++t) {
const auto grad = GET_GRAD(data_, t);
const auto hess = GET_HESS(data_, t);
data_size_t cnt = static_cast<data_size_t>(Common::RoundInt(hess * cnt_factor));
data_size_t cnt =
static_cast<data_size_t>(Common::RoundInt(hess * cnt_factor));
// if data not enough, or sum hessian too small
if (cnt < meta_->config->min_data_in_leaf
|| hess < meta_->config->min_sum_hessian_in_leaf) continue;
if (cnt < meta_->config->min_data_in_leaf ||
hess < meta_->config->min_sum_hessian_in_leaf)
continue;
data_size_t other_count = num_data - cnt;
// if data not enough
if (other_count < meta_->config->min_data_in_leaf) continue;
double sum_other_hessian = sum_hessian - hess - kEpsilon;
// if sum hessian too small
if (sum_other_hessian < meta_->config->min_sum_hessian_in_leaf) continue;
if (sum_other_hessian < meta_->config->min_sum_hessian_in_leaf)
continue;
double sum_other_gradient = sum_gradient - grad;
if (IS_RAND) {
if (USE_RAND) {
if (t != rand_threshold) {
continue;
}
}
// current split gain
double current_gain = GetSplitGains(sum_other_gradient, sum_other_hessian, grad, hess + kEpsilon,
meta_->config->lambda_l1, l2, meta_->config->max_delta_step, constraints, 0);
double current_gain = GetSplitGains<USE_MC, USE_L1, USE_MAX_OUTPUT>(
sum_other_gradient, sum_other_hessian, grad, hess + kEpsilon,
meta_->config->lambda_l1, l2, meta_->config->max_delta_step,
constraints, 0);
// gain with split is worse than without split
if (current_gain <= min_gain_shift) continue;
......@@ -214,7 +338,8 @@ class FeatureHistogram {
}
} else {
for (int i = 0; i < used_bin; ++i) {
if (Common::RoundInt(GET_HESS(data_, i) * cnt_factor) >= meta_->config->cat_smooth) {
if (Common::RoundInt(GET_HESS(data_, i) * cnt_factor) >=
meta_->config->cat_smooth) {
sorted_idx.push_back(i);
}
}
......@@ -226,19 +351,21 @@ class FeatureHistogram {
return (sum_grad) / (sum_hess + meta_->config->cat_smooth);
};
std::sort(sorted_idx.begin(), sorted_idx.end(),
[this, &ctr_fun](int i, int j) {
return ctr_fun(GET_GRAD(data_, i), GET_HESS(data_, i)) < ctr_fun(GET_GRAD(data_, j), GET_HESS(data_, j));
});
[this, &ctr_fun](int i, int j) {
return ctr_fun(GET_GRAD(data_, i), GET_HESS(data_, i)) <
ctr_fun(GET_GRAD(data_, j), GET_HESS(data_, j));
});
std::vector<int> find_direction(1, 1);
std::vector<int> start_position(1, 0);
find_direction.push_back(-1);
start_position.push_back(used_bin - 1);
const int max_num_cat = std::min(meta_->config->max_cat_threshold, (used_bin + 1) / 2);
const int max_num_cat =
std::min(meta_->config->max_cat_threshold, (used_bin + 1) / 2);
int max_threshold = std::max(std::min(max_num_cat, used_bin) - 1, 0);
if (IS_RAND) {
if (USE_RAND) {
if (max_threshold > 0) {
rand_threshold = rand_.NextInt(0, max_threshold);
rand_threshold = meta_->rand.NextInt(0, max_threshold);
}
}
......@@ -256,17 +383,21 @@ class FeatureHistogram {
start_pos += dir;
const auto grad = GET_GRAD(data_, t);
const auto hess = GET_HESS(data_, t);
data_size_t cnt = static_cast<data_size_t>(Common::RoundInt(hess * cnt_factor));
data_size_t cnt =
static_cast<data_size_t>(Common::RoundInt(hess * cnt_factor));
sum_left_gradient += grad;
sum_left_hessian += hess;
left_count += cnt;
cnt_cur_group += cnt;
if (left_count < meta_->config->min_data_in_leaf
|| sum_left_hessian < meta_->config->min_sum_hessian_in_leaf) continue;
if (left_count < meta_->config->min_data_in_leaf ||
sum_left_hessian < meta_->config->min_sum_hessian_in_leaf)
continue;
data_size_t right_count = num_data - left_count;
if (right_count < meta_->config->min_data_in_leaf || right_count < min_data_per_group) break;
if (right_count < meta_->config->min_data_in_leaf ||
right_count < min_data_per_group)
break;
double sum_right_hessian = sum_hessian - sum_left_hessian;
if (sum_right_hessian < meta_->config->min_sum_hessian_in_leaf) break;
......@@ -276,13 +407,15 @@ class FeatureHistogram {
cnt_cur_group = 0;
double sum_right_gradient = sum_gradient - sum_left_gradient;
if (IS_RAND) {
if (USE_RAND) {
if (i != rand_threshold) {
continue;
}
}
double current_gain = GetSplitGains(sum_left_gradient, sum_left_hessian, sum_right_gradient, sum_right_hessian,
meta_->config->lambda_l1, l2, meta_->config->max_delta_step, constraints, 0);
double current_gain = GetSplitGains<USE_MC, USE_L1, USE_MAX_OUTPUT>(
sum_left_gradient, sum_left_hessian, sum_right_gradient,
sum_right_hessian, meta_->config->lambda_l1, l2,
meta_->config->max_delta_step, constraints, 0);
if (current_gain <= min_gain_shift) continue;
is_splittable_ = true;
if (current_gain > best_gain) {
......@@ -298,24 +431,32 @@ class FeatureHistogram {
}
if (is_splittable_) {
output->left_output = CalculateSplittedLeafOutput(best_sum_left_gradient, best_sum_left_hessian,
meta_->config->lambda_l1, l2, meta_->config->max_delta_step, constraints);
output->left_output =
CalculateSplittedLeafOutput<USE_MC, USE_L1, USE_MAX_OUTPUT>(
best_sum_left_gradient, best_sum_left_hessian,
meta_->config->lambda_l1, l2, meta_->config->max_delta_step,
constraints);
output->left_count = best_left_count;
output->left_sum_gradient = best_sum_left_gradient;
output->left_sum_hessian = best_sum_left_hessian - kEpsilon;
output->right_output = CalculateSplittedLeafOutput(
sum_gradient - best_sum_left_gradient, sum_hessian - best_sum_left_hessian,
meta_->config->lambda_l1, l2, meta_->config->max_delta_step, constraints);
output->right_output =
CalculateSplittedLeafOutput<USE_MC, USE_L1, USE_MAX_OUTPUT>(
sum_gradient - best_sum_left_gradient,
sum_hessian - best_sum_left_hessian, meta_->config->lambda_l1, l2,
meta_->config->max_delta_step, constraints);
output->right_count = num_data - best_left_count;
output->right_sum_gradient = sum_gradient - best_sum_left_gradient;
output->right_sum_hessian = sum_hessian - best_sum_left_hessian - kEpsilon;
output->right_sum_hessian =
sum_hessian - best_sum_left_hessian - kEpsilon;
output->gain = best_gain - min_gain_shift;
if (use_onehot) {
output->num_cat_threshold = 1;
output->cat_threshold = std::vector<uint32_t>(1, static_cast<uint32_t>(best_threshold));
output->cat_threshold =
std::vector<uint32_t>(1, static_cast<uint32_t>(best_threshold));
} else {
output->num_cat_threshold = best_threshold + 1;
output->cat_threshold = std::vector<uint32_t>(output->num_cat_threshold);
output->cat_threshold =
std::vector<uint32_t>(output->num_cat_threshold);
if (best_dir == 1) {
for (int i = 0; i < output->num_cat_threshold; ++i) {
auto t = sorted_idx[i];
......@@ -333,18 +474,23 @@ class FeatureHistogram {
}
void GatherInfoForThreshold(double sum_gradient, double sum_hessian,
uint32_t threshold, data_size_t num_data, SplitInfo* output) {
uint32_t threshold, data_size_t num_data,
SplitInfo* output) {
if (meta_->bin_type == BinType::NumericalBin) {
GatherInfoForThresholdNumerical(sum_gradient, sum_hessian, threshold, num_data, output);
GatherInfoForThresholdNumerical(sum_gradient, sum_hessian, threshold,
num_data, output);
} else {
GatherInfoForThresholdCategorical(sum_gradient, sum_hessian, threshold, num_data, output);
GatherInfoForThresholdCategorical(sum_gradient, sum_hessian, threshold,
num_data, output);
}
}
void GatherInfoForThresholdNumerical(double sum_gradient, double sum_hessian,
uint32_t threshold, data_size_t num_data, SplitInfo* output) {
double gain_shift = GetLeafSplitGain(sum_gradient, sum_hessian,
meta_->config->lambda_l1, meta_->config->lambda_l2, meta_->config->max_delta_step);
uint32_t threshold, data_size_t num_data,
SplitInfo* output) {
double gain_shift = GetLeafGain<true, true>(
sum_gradient, sum_hessian, meta_->config->lambda_l1,
meta_->config->lambda_l2, meta_->config->max_delta_step);
double min_gain_shift = gain_shift + meta_->config->min_gain_to_split;
// do stuff here
......@@ -368,13 +514,19 @@ class FeatureHistogram {
const double cnt_factor = num_data / sum_hessian;
// from right to left, and we don't need data in bin0
for (; t >= t_end; --t) {
if (static_cast<uint32_t>(t + offset) < threshold) { break; }
if (static_cast<uint32_t>(t + offset) < threshold) {
break;
}
// need to skip default bin
if (skip_default_bin && (t + offset) == static_cast<int>(meta_->default_bin)) { continue; }
if (skip_default_bin &&
(t + offset) == static_cast<int>(meta_->default_bin)) {
continue;
}
const auto grad = GET_GRAD(data_, t);
const auto hess = GET_HESS(data_, t);
data_size_t cnt = static_cast<data_size_t>(Common::RoundInt(hess * cnt_factor));
data_size_t cnt =
static_cast<data_size_t>(Common::RoundInt(hess * cnt_factor));
sum_right_gradient += grad;
sum_right_hessian += hess;
right_count += cnt;
......@@ -382,42 +534,50 @@ class FeatureHistogram {
double sum_left_gradient = sum_gradient - sum_right_gradient;
double sum_left_hessian = sum_hessian - sum_right_hessian;
data_size_t left_count = num_data - right_count;
double current_gain = GetLeafSplitGain(sum_left_gradient, sum_left_hessian,
meta_->config->lambda_l1, meta_->config->lambda_l2, meta_->config->max_delta_step)
+ GetLeafSplitGain(sum_right_gradient, sum_right_hessian,
meta_->config->lambda_l1, meta_->config->lambda_l2, meta_->config->max_delta_step);
double current_gain =
GetLeafGain<true, true>(
sum_left_gradient, sum_left_hessian, meta_->config->lambda_l1,
meta_->config->lambda_l2, meta_->config->max_delta_step) +
GetLeafGain<true, true>(
sum_right_gradient, sum_right_hessian, meta_->config->lambda_l1,
meta_->config->lambda_l2, meta_->config->max_delta_step);
// gain with split is worse than without split
if (std::isnan(current_gain) || current_gain <= min_gain_shift) {
output->gain = kMinScore;
Log::Warning("'Forced Split' will be ignored since the gain getting worse. ");
Log::Warning(
"'Forced Split' will be ignored since the gain getting worse. ");
return;
}
// update split information
output->threshold = threshold;
output->left_output = CalculateSplittedLeafOutput(sum_left_gradient, sum_left_hessian,
meta_->config->lambda_l1, meta_->config->lambda_l2, meta_->config->max_delta_step);
output->left_output = CalculateSplittedLeafOutput<true, true>(
sum_left_gradient, sum_left_hessian, meta_->config->lambda_l1,
meta_->config->lambda_l2, meta_->config->max_delta_step);
output->left_count = left_count;
output->left_sum_gradient = sum_left_gradient;
output->left_sum_hessian = sum_left_hessian - kEpsilon;
output->right_output = CalculateSplittedLeafOutput(
sum_gradient - sum_left_gradient, sum_hessian - sum_left_hessian,
meta_->config->lambda_l1, meta_->config->lambda_l2, meta_->config->max_delta_step);
output->right_output = CalculateSplittedLeafOutput<true, true>(
sum_gradient - sum_left_gradient, sum_hessian - sum_left_hessian,
meta_->config->lambda_l1, meta_->config->lambda_l2,
meta_->config->max_delta_step);
output->right_count = num_data - left_count;
output->right_sum_gradient = sum_gradient - sum_left_gradient;
output->right_sum_hessian = sum_hessian - sum_left_hessian - kEpsilon;
output->gain = current_gain;
output->gain -= min_gain_shift;
output->gain = current_gain - min_gain_shift;
output->default_left = true;
}
void GatherInfoForThresholdCategorical(double sum_gradient, double sum_hessian,
uint32_t threshold, data_size_t num_data, SplitInfo* output) {
void GatherInfoForThresholdCategorical(double sum_gradient,
double sum_hessian, uint32_t threshold,
data_size_t num_data,
SplitInfo* output) {
// get SplitInfo for a given one-hot categorical split.
output->default_left = false;
double gain_shift = GetLeafSplitGain(sum_gradient, sum_hessian,
meta_->config->lambda_l1, meta_->config->lambda_l2, meta_->config->max_delta_step);
double gain_shift = GetLeafGain<true, true>(
sum_gradient, sum_hessian, meta_->config->lambda_l1,
meta_->config->lambda_l2, meta_->config->max_delta_step);
double min_gain_shift = gain_shift + meta_->config->min_gain_to_split;
bool is_full_categorical = meta_->missing_type == MissingType::None;
int used_bin = meta_->num_bin - 1 + is_full_categorical;
......@@ -429,7 +589,8 @@ class FeatureHistogram {
const double cnt_factor = num_data / sum_hessian;
const auto grad = GET_GRAD(data_, threshold);
const auto hess = GET_HESS(data_, threshold);
data_size_t cnt = static_cast<data_size_t>(Common::RoundInt(hess * cnt_factor));
data_size_t cnt =
static_cast<data_size_t>(Common::RoundInt(hess * cnt_factor));
double l2 = meta_->config->lambda_l2;
data_size_t left_count = cnt;
......@@ -439,23 +600,29 @@ class FeatureHistogram {
double sum_left_gradient = grad;
double sum_right_gradient = sum_gradient - sum_left_gradient;
// current split gain
double current_gain = GetLeafSplitGain(sum_right_gradient, sum_right_hessian,
meta_->config->lambda_l1, l2, meta_->config->max_delta_step)
+ GetLeafSplitGain(sum_left_gradient, sum_left_hessian,
meta_->config->lambda_l1, l2, meta_->config->max_delta_step);
double current_gain =
GetLeafGain<true, true>(sum_right_gradient, sum_right_hessian,
meta_->config->lambda_l1, l2,
meta_->config->max_delta_step) +
GetLeafGain<true, true>(sum_left_gradient, sum_left_hessian,
meta_->config->lambda_l1, l2,
meta_->config->max_delta_step);
if (std::isnan(current_gain) || current_gain <= min_gain_shift) {
output->gain = kMinScore;
Log::Warning("'Forced Split' will be ignored since the gain getting worse.");
Log::Warning(
"'Forced Split' will be ignored since the gain getting worse.");
return;
}
output->left_output = CalculateSplittedLeafOutput(sum_left_gradient, sum_left_hessian,
meta_->config->lambda_l1, l2, meta_->config->max_delta_step);
output->left_output = CalculateSplittedLeafOutput<true, true>(
sum_left_gradient, sum_left_hessian, meta_->config->lambda_l1, l2,
meta_->config->max_delta_step);
output->left_count = left_count;
output->left_sum_gradient = sum_left_gradient;
output->left_sum_hessian = sum_left_hessian - kEpsilon;
output->right_output = CalculateSplittedLeafOutput(sum_right_gradient, sum_right_hessian,
meta_->config->lambda_l1, l2, meta_->config->max_delta_step);
output->right_output = CalculateSplittedLeafOutput<true, true>(
sum_right_gradient, sum_right_hessian, meta_->config->lambda_l1, l2,
meta_->config->max_delta_step);
output->right_count = right_count;
output->right_sum_gradient = sum_gradient - sum_left_gradient;
output->right_sum_hessian = sum_right_hessian - kEpsilon;
......@@ -464,29 +631,29 @@ class FeatureHistogram {
output->cat_threshold = std::vector<uint32_t>(1, threshold);
}
/*!
* \brief Binary size of this histogram
*/
* \brief Binary size of this histogram
*/
int SizeOfHistgram() const {
return (meta_->num_bin - meta_->offset) * kHistEntrySize;
}
/*!
* \brief Restore histogram from memory
*/
* \brief Restore histogram from memory
*/
void FromMemory(char* memory_data) {
std::memcpy(data_, memory_data, (meta_->num_bin - meta_->offset) * kHistEntrySize);
std::memcpy(data_, memory_data,
(meta_->num_bin - meta_->offset) * kHistEntrySize);
}
/*!
* \brief True if this histogram can be splitted
*/
* \brief True if this histogram can be splitted
*/
bool is_splittable() { return is_splittable_; }
/*!
* \brief Set splittable to this histogram
*/
* \brief Set splittable to this histogram
*/
void set_is_splittable(bool val) { is_splittable_ = val; }
static double ThresholdL1(double s, double l1) {
......@@ -494,97 +661,153 @@ class FeatureHistogram {
return Common::Sign(s) * reg_s;
}
static double CalculateSplittedLeafOutput(double sum_gradients, double sum_hessians, double l1, double l2, double max_delta_step) {
double ret = -ThresholdL1(sum_gradients, l1) / (sum_hessians + l2);
if (max_delta_step <= 0.0f || std::fabs(ret) <= max_delta_step) {
template <bool USE_L1, bool USE_MAX_OUTPUT>
static double CalculateSplittedLeafOutput(double sum_gradients,
double sum_hessians, double l1,
double l2, double max_delta_step) {
if (USE_L1) {
double ret = -ThresholdL1(sum_gradients, l1) / (sum_hessians + l2);
if (USE_MAX_OUTPUT) {
if (max_delta_step > 0 && std::fabs(ret) > max_delta_step) {
return Common::Sign(ret) * max_delta_step;
}
}
return ret;
} else {
return Common::Sign(ret) * max_delta_step;
double ret = -sum_gradients / (sum_hessians + l2);
if (USE_MAX_OUTPUT) {
if (max_delta_step > 0 && std::fabs(ret) > max_delta_step) {
return Common::Sign(ret) * max_delta_step;
}
}
return ret;
}
}
private:
static double GetSplitGains(double sum_left_gradients, double sum_left_hessians,
double sum_right_gradients, double sum_right_hessians,
double l1, double l2, double max_delta_step,
const ConstraintEntry& constraints, int8_t monotone_constraint) {
double left_output = CalculateSplittedLeafOutput(sum_left_gradients, sum_left_hessians, l1, l2, max_delta_step, constraints);
double right_output = CalculateSplittedLeafOutput(sum_right_gradients, sum_right_hessians, l1, l2, max_delta_step, constraints);
if (((monotone_constraint > 0) && (left_output > right_output)) ||
((monotone_constraint < 0) && (left_output < right_output))) {
return 0;
}
return GetLeafSplitGainGivenOutput(sum_left_gradients, sum_left_hessians, l1, l2, left_output)
+ GetLeafSplitGainGivenOutput(sum_right_gradients, sum_right_hessians, l1, l2, right_output);
template <bool USE_MC, bool USE_L1, bool USE_MAX_OUTPUT>
static double CalculateSplittedLeafOutput(
double sum_gradients, double sum_hessians, double l1, double l2,
double max_delta_step, const ConstraintEntry& constraints) {
double ret = CalculateSplittedLeafOutput<USE_L1, USE_MAX_OUTPUT>(
sum_gradients, sum_hessians, l1, l2, max_delta_step);
if (USE_MC) {
if (ret < constraints.min) {
ret = constraints.min;
} else if (ret > constraints.max) {
ret = constraints.max;
}
}
return ret;
}
/*!
* \brief Calculate the output of a leaf based on regularized sum_gradients and sum_hessians
* \param sum_gradients
* \param sum_hessians
* \return leaf output
*/
static double CalculateSplittedLeafOutput(double sum_gradients, double sum_hessians,
double l1, double l2, double max_delta_step,
const ConstraintEntry& constraints) {
double ret = CalculateSplittedLeafOutput(sum_gradients, sum_hessians, l1, l2, max_delta_step);
if (ret < constraints.min) {
ret = constraints.min;
} else if (ret > constraints.max) {
ret = constraints.max;
private:
template <bool USE_MC, bool USE_L1, bool USE_MAX_OUTPUT>
static double GetSplitGains(double sum_left_gradients,
double sum_left_hessians,
double sum_right_gradients,
double sum_right_hessians, double l1, double l2,
double max_delta_step,
const ConstraintEntry& constraints,
int8_t monotone_constraint) {
if (!USE_MC) {
return GetLeafGain<USE_L1, USE_MAX_OUTPUT>(sum_left_gradients,
sum_left_hessians, l1, l2,
max_delta_step) +
GetLeafGain<USE_L1, USE_MAX_OUTPUT>(sum_right_gradients,
sum_right_hessians, l1, l2,
max_delta_step);
} else {
double left_output =
CalculateSplittedLeafOutput<USE_MC, USE_L1, USE_MAX_OUTPUT>(
sum_left_gradients, sum_left_hessians, l1, l2, max_delta_step,
constraints);
double right_output =
CalculateSplittedLeafOutput<USE_MC, USE_L1, USE_MAX_OUTPUT>(
sum_right_gradients, sum_right_hessians, l1, l2, max_delta_step,
constraints);
if (((monotone_constraint > 0) && (left_output > right_output)) ||
((monotone_constraint < 0) && (left_output < right_output))) {
return 0;
}
return GetLeafGainGivenOutput<USE_L1>(
sum_left_gradients, sum_left_hessians, l1, l2, left_output) +
GetLeafGainGivenOutput<USE_L1>(
sum_right_gradients, sum_right_hessians, l1, l2, right_output);
}
return ret;
}
/*!
* \brief Calculate the split gain based on regularized sum_gradients and sum_hessians
* \param sum_gradients
* \param sum_hessians
* \return split gain
*/
static double GetLeafSplitGain(double sum_gradients, double sum_hessians, double l1, double l2, double max_delta_step) {
double output = CalculateSplittedLeafOutput(sum_gradients, sum_hessians, l1, l2, max_delta_step);
return GetLeafSplitGainGivenOutput(sum_gradients, sum_hessians, l1, l2, output);
template <bool USE_L1, bool USE_MAX_OUTPUT>
static double GetLeafGain(double sum_gradients, double sum_hessians,
double l1, double l2, double max_delta_step) {
if (!USE_MAX_OUTPUT) {
if (USE_L1) {
const double sg_l1 = ThresholdL1(sum_gradients, l1);
return (sg_l1 * sg_l1) / (sum_hessians + l2);
} else {
return (sum_gradients * sum_gradients) / (sum_hessians + l2);
}
} else {
double output = CalculateSplittedLeafOutput<USE_L1, USE_MAX_OUTPUT>(
sum_gradients, sum_hessians, l1, l2, max_delta_step);
return GetLeafGainGivenOutput<USE_L1>(sum_gradients, sum_hessians, l1, l2,
output);
}
}
static double GetLeafSplitGainGivenOutput(double sum_gradients, double sum_hessians, double l1, double l2, double output) {
const double sg_l1 = ThresholdL1(sum_gradients, l1);
return -(2.0 * sg_l1 * output + (sum_hessians + l2) * output * output);
template <bool USE_L1>
static double GetLeafGainGivenOutput(double sum_gradients,
double sum_hessians, double l1,
double l2, double output) {
if (USE_L1) {
const double sg_l1 = ThresholdL1(sum_gradients, l1);
return -(2.0 * sg_l1 * output + (sum_hessians + l2) * output * output);
} else {
return -(2.0 * sum_gradients * output +
(sum_hessians + l2) * output * output);
}
}
template<bool IS_RAND>
void FindBestThresholdSequence(double sum_gradient, double sum_hessian, data_size_t num_data, const ConstraintEntry& constraints,
double min_gain_shift, SplitInfo* output, int dir, bool skip_default_bin, bool use_na_as_missing, int rand_threshold) {
template <bool USE_RAND, bool USE_MC, bool USE_L1, bool USE_MAX_OUTPUT,
bool REVERSE, bool SKIP_DEFAULT_BIN, bool NA_AS_MISSING>
void FindBestThresholdSequence(double sum_gradient, double sum_hessian,
data_size_t num_data,
const ConstraintEntry& constraints,
double min_gain_shift, SplitInfo* output,
int rand_threshold) {
const int8_t offset = meta_->offset;
double best_sum_left_gradient = NAN;
double best_sum_left_hessian = NAN;
double best_gain = kMinScore;
data_size_t best_left_count = 0;
uint32_t best_threshold = static_cast<uint32_t>(meta_->num_bin);
const double cnt_factor = num_data / sum_hessian;
if (dir == -1) {
if (REVERSE) {
double sum_right_gradient = 0.0f;
double sum_right_hessian = kEpsilon;
data_size_t right_count = 0;
int t = meta_->num_bin - 1 - offset - use_na_as_missing;
int t = meta_->num_bin - 1 - offset - NA_AS_MISSING;
const int t_end = 1 - offset;
// from right to left, and we don't need data in bin0
for (; t >= t_end; --t) {
// need to skip default bin
if (skip_default_bin && (t + offset) == static_cast<int>(meta_->default_bin)) { continue; }
if (SKIP_DEFAULT_BIN) {
if ((t + offset) == static_cast<int>(meta_->default_bin)) {
continue;
}
}
const auto grad = GET_GRAD(data_, t);
const auto hess = GET_HESS(data_, t);
data_size_t cnt = static_cast<data_size_t>(Common::RoundInt(hess * cnt_factor));
data_size_t cnt =
static_cast<data_size_t>(Common::RoundInt(hess * cnt_factor));
sum_right_gradient += grad;
sum_right_hessian += hess;
right_count += cnt;
// if data not enough, or sum hessian too small
if (right_count < meta_->config->min_data_in_leaf
|| sum_right_hessian < meta_->config->min_sum_hessian_in_leaf) continue;
if (right_count < meta_->config->min_data_in_leaf ||
sum_right_hessian < meta_->config->min_sum_hessian_in_leaf)
continue;
data_size_t left_count = num_data - right_count;
// if data not enough
if (left_count < meta_->config->min_data_in_leaf) break;
......@@ -594,15 +817,17 @@ class FeatureHistogram {
if (sum_left_hessian < meta_->config->min_sum_hessian_in_leaf) break;
double sum_left_gradient = sum_gradient - sum_right_gradient;
if (IS_RAND) {
if (USE_RAND) {
if (t - 1 + offset != rand_threshold) {
continue;
}
}
// current split gain
double current_gain = GetSplitGains(sum_left_gradient, sum_left_hessian, sum_right_gradient, sum_right_hessian,
meta_->config->lambda_l1, meta_->config->lambda_l2, meta_->config->max_delta_step,
constraints, meta_->monotone_type);
double current_gain = GetSplitGains<USE_MC, USE_L1, USE_MAX_OUTPUT>(
sum_left_gradient, sum_left_hessian, sum_right_gradient,
sum_right_hessian, meta_->config->lambda_l1,
meta_->config->lambda_l2, meta_->config->max_delta_step,
constraints, meta_->monotone_type);
// gain with split is worse than without split
if (current_gain <= min_gain_shift) continue;
......@@ -626,32 +851,40 @@ class FeatureHistogram {
int t = 0;
const int t_end = meta_->num_bin - 2 - offset;
if (use_na_as_missing && offset == 1) {
sum_left_gradient = sum_gradient;
sum_left_hessian = sum_hessian - kEpsilon;
left_count = num_data;
for (int i = 0; i < meta_->num_bin - offset; ++i) {
const auto grad = GET_GRAD(data_, i);
const auto hess = GET_HESS(data_, i);
data_size_t cnt = static_cast<data_size_t>(Common::RoundInt(hess * cnt_factor));
sum_left_gradient -= grad;
sum_left_hessian -= hess;
left_count -= cnt;
if (NA_AS_MISSING) {
if (offset == 1) {
sum_left_gradient = sum_gradient;
sum_left_hessian = sum_hessian - kEpsilon;
left_count = num_data;
for (int i = 0; i < meta_->num_bin - offset; ++i) {
const auto grad = GET_GRAD(data_, i);
const auto hess = GET_HESS(data_, i);
data_size_t cnt =
static_cast<data_size_t>(Common::RoundInt(hess * cnt_factor));
sum_left_gradient -= grad;
sum_left_hessian -= hess;
left_count -= cnt;
}
t = -1;
}
t = -1;
}
for (; t <= t_end; ++t) {
// need to skip default bin
if (skip_default_bin && (t + offset) == static_cast<int>(meta_->default_bin)) { continue; }
if (SKIP_DEFAULT_BIN) {
if ((t + offset) == static_cast<int>(meta_->default_bin)) {
continue;
}
}
if (t >= 0) {
sum_left_gradient += GET_GRAD(data_, t);
sum_left_hessian += GET_HESS(data_, t);
left_count += static_cast<data_size_t>(Common::RoundInt(GET_HESS(data_, t) * cnt_factor));
left_count += static_cast<data_size_t>(
Common::RoundInt(GET_HESS(data_, t) * cnt_factor));
}
// if data not enough, or sum hessian too small
if (left_count < meta_->config->min_data_in_leaf
|| sum_left_hessian < meta_->config->min_sum_hessian_in_leaf) continue;
if (left_count < meta_->config->min_data_in_leaf ||
sum_left_hessian < meta_->config->min_sum_hessian_in_leaf)
continue;
data_size_t right_count = num_data - left_count;
// if data not enough
if (right_count < meta_->config->min_data_in_leaf) break;
......@@ -661,15 +894,17 @@ class FeatureHistogram {
if (sum_right_hessian < meta_->config->min_sum_hessian_in_leaf) break;
double sum_right_gradient = sum_gradient - sum_left_gradient;
if (IS_RAND) {
if (USE_RAND) {
if (t + offset != rand_threshold) {
continue;
}
}
// current split gain
double current_gain = GetSplitGains(sum_left_gradient, sum_left_hessian, sum_right_gradient, sum_right_hessian,
meta_->config->lambda_l1, meta_->config->lambda_l2, meta_->config->max_delta_step,
constraints, meta_->monotone_type);
double current_gain = GetSplitGains<USE_MC, USE_L1, USE_MAX_OUTPUT>(
sum_left_gradient, sum_left_hessian, sum_right_gradient,
sum_right_hessian, meta_->config->lambda_l1,
meta_->config->lambda_l2, meta_->config->max_delta_step,
constraints, meta_->monotone_type);
// gain with split is worse than without split
if (current_gain <= min_gain_shift) continue;
......@@ -686,25 +921,29 @@ class FeatureHistogram {
}
}
if (is_splittable_ && best_gain > output->gain) {
if (is_splittable_ && best_gain > output->gain + min_gain_shift) {
// update split information
output->threshold = best_threshold;
output->left_output = CalculateSplittedLeafOutput(
best_sum_left_gradient, best_sum_left_hessian,
meta_->config->lambda_l1, meta_->config->lambda_l2,
meta_->config->max_delta_step, constraints);
output->left_output =
CalculateSplittedLeafOutput<USE_MC, USE_L1, USE_MAX_OUTPUT>(
best_sum_left_gradient, best_sum_left_hessian,
meta_->config->lambda_l1, meta_->config->lambda_l2,
meta_->config->max_delta_step, constraints);
output->left_count = best_left_count;
output->left_sum_gradient = best_sum_left_gradient;
output->left_sum_hessian = best_sum_left_hessian - kEpsilon;
output->right_output = CalculateSplittedLeafOutput(
sum_gradient - best_sum_left_gradient, sum_hessian - best_sum_left_hessian,
meta_->config->lambda_l1, meta_->config->lambda_l2, meta_->config->max_delta_step,
constraints);
output->right_output =
CalculateSplittedLeafOutput<USE_MC, USE_L1, USE_MAX_OUTPUT>(
sum_gradient - best_sum_left_gradient,
sum_hessian - best_sum_left_hessian, meta_->config->lambda_l1,
meta_->config->lambda_l2, meta_->config->max_delta_step,
constraints);
output->right_count = num_data - best_left_count;
output->right_sum_gradient = sum_gradient - best_sum_left_gradient;
output->right_sum_hessian = sum_hessian - best_sum_left_hessian - kEpsilon;
output->gain = best_gain;
output->default_left = dir == -1;
output->right_sum_hessian =
sum_hessian - best_sum_left_hessian - kEpsilon;
output->gain = best_gain - min_gain_shift;
output->default_left = REVERSE;
}
}
......@@ -712,34 +951,32 @@ class FeatureHistogram {
/*! \brief sum of gradient of each bin */
hist_t* data_;
bool is_splittable_ = true;
/*! \brief random number generator for extremely randomized trees */
Random rand_;
std::function<void(double, double, data_size_t, const ConstraintEntry&, SplitInfo*)>
find_best_threshold_fun_;
std::function<void(double, double, data_size_t, const ConstraintEntry&,
SplitInfo*)>
find_best_threshold_fun_;
};
class HistogramPool {
public:
/*!
* \brief Constructor
*/
* \brief Constructor
*/
HistogramPool() {
cache_size_ = 0;
total_size_ = 0;
}
/*!
* \brief Destructor
*/
~HistogramPool() {
}
* \brief Destructor
*/
~HistogramPool() {}
/*!
* \brief Reset pool size
* \param cache_size Max cache size
* \param total_size Total size will be used
*/
* \brief Reset pool size
* \param cache_size Max cache size
* \param total_size Total size will be used
*/
void Reset(int cache_size, int total_size) {
cache_size_ = cache_size;
// at least need 2 bucket to store smaller leaf and larger leaf
......@@ -758,8 +995,8 @@ class HistogramPool {
}
/*!
* \brief Reset mapper
*/
* \brief Reset mapper
*/
void ResetMap() {
if (!is_enough_) {
cur_time_ = 0;
......@@ -768,63 +1005,55 @@ class HistogramPool {
std::fill(last_used_time_.begin(), last_used_time_.end(), 0);
}
}
static void SetFeatureInfo(const Dataset* train_data, const Config* config, std::vector<FeatureMetainfo>* feature_meta) {
template <bool USE_DATA, bool USE_CONFIG>
static void SetFeatureInfo(const Dataset* train_data, const Config* config,
std::vector<FeatureMetainfo>* feature_meta) {
auto& ref_feature_meta = *feature_meta;
const int num_feature = train_data->num_features();
ref_feature_meta.resize(num_feature);
#pragma omp parallel for schedule(static)
#pragma omp parallel for schedule(static, 512) if (num_feature >= 1024)
for (int i = 0; i < num_feature; ++i) {
ref_feature_meta[i].num_bin = train_data->FeatureNumBin(i);
ref_feature_meta[i].default_bin = train_data->FeatureBinMapper(i)->GetDefaultBin();
ref_feature_meta[i].missing_type = train_data->FeatureBinMapper(i)->missing_type();
const int real_fidx = train_data->RealFeatureIndex(i);
if (!config->monotone_constraints.empty()) {
ref_feature_meta[i].monotone_type = config->monotone_constraints[real_fidx];
} else {
ref_feature_meta[i].monotone_type = 0;
}
if (!config->feature_contri.empty()) {
ref_feature_meta[i].penalty = config->feature_contri[real_fidx];
} else {
ref_feature_meta[i].penalty = 1.0;
if (USE_DATA) {
ref_feature_meta[i].num_bin = train_data->FeatureNumBin(i);
ref_feature_meta[i].default_bin =
train_data->FeatureBinMapper(i)->GetDefaultBin();
ref_feature_meta[i].missing_type =
train_data->FeatureBinMapper(i)->missing_type();
if (train_data->FeatureBinMapper(i)->GetMostFreqBin() == 0) {
ref_feature_meta[i].offset = 1;
} else {
ref_feature_meta[i].offset = 0;
}
ref_feature_meta[i].bin_type =
train_data->FeatureBinMapper(i)->bin_type();
}
if (train_data->FeatureBinMapper(i)->GetMostFreqBin() == 0) {
ref_feature_meta[i].offset = 1;
} else {
ref_feature_meta[i].offset = 0;
if (USE_CONFIG) {
const int real_fidx = train_data->RealFeatureIndex(i);
if (!config->monotone_constraints.empty()) {
ref_feature_meta[i].monotone_type =
config->monotone_constraints[real_fidx];
} else {
ref_feature_meta[i].monotone_type = 0;
}
if (!config->feature_contri.empty()) {
ref_feature_meta[i].penalty = config->feature_contri[real_fidx];
} else {
ref_feature_meta[i].penalty = 1.0;
}
ref_feature_meta[i].rand = Random(config->extra_seed + i);
}
ref_feature_meta[i].config = config;
ref_feature_meta[i].bin_type = train_data->FeatureBinMapper(i)->bin_type();
}
}
static void SetFeatureInfoConfig(const Dataset* train_data, const Config* config, std::vector<FeatureMetainfo>* feature_meta) {
auto& ref_feature_meta = *feature_meta;
const int num_feature = train_data->num_features();
ref_feature_meta.resize(num_feature);
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_feature; ++i) {
const int real_fidx = train_data->RealFeatureIndex(i);
if (!config->monotone_constraints.empty()) {
ref_feature_meta[i].monotone_type = config->monotone_constraints[real_fidx];
} else {
ref_feature_meta[i].monotone_type = 0;
}
if (!config->feature_contri.empty()) {
ref_feature_meta[i].penalty = config->feature_contri[real_fidx];
} else {
ref_feature_meta[i].penalty = 1.0;
}
ref_feature_meta[i].config = config;
}
}
void DynamicChangeSize(const Dataset* train_data, bool is_hist_colwise, const Config* config, int cache_size, int total_size) {
void DynamicChangeSize(const Dataset* train_data, bool is_hist_colwise,
const Config* config, int cache_size, int total_size) {
if (feature_metas_.empty()) {
SetFeatureInfo(train_data, config, &feature_metas_);
SetFeatureInfo<true, true>(train_data, config, &feature_metas_);
uint64_t bin_cnt_over_features = 0;
for (int i = 0; i < train_data->num_features(); ++i) {
bin_cnt_over_features += static_cast<uint64_t>(feature_metas_[i].num_bin);
bin_cnt_over_features +=
static_cast<uint64_t>(feature_metas_[i].num_bin);
}
Log::Info("Total Bins %d", bin_cnt_over_features);
}
......@@ -860,7 +1089,7 @@ class HistogramPool {
}
}
OMP_INIT_EX();
#pragma omp parallel for schedule(static)
#pragma omp parallel for schedule(static)
for (int i = old_cache_size; i < cache_size; ++i) {
OMP_LOOP_EX_BEGIN();
pool_[i].reset(new FeatureHistogram[train_data->num_features()]);
......@@ -874,15 +1103,16 @@ class HistogramPool {
}
void ResetConfig(const Dataset* train_data, const Config* config) {
SetFeatureInfoConfig(train_data, config, &feature_metas_);
SetFeatureInfo<false, true>(train_data, config, &feature_metas_);
}
/*!
* \brief Get data for the specific index
* \param idx which index want to get
* \param out output data will store into this
* \return True if this index is in the pool, False if this index is not in the pool
*/
* \brief Get data for the specific index
* \param idx which index want to get
* \param out output data will store into this
* \return True if this index is in the pool, False if this index is not in
* the pool
*/
bool Get(int idx, FeatureHistogram** out) {
if (is_enough_) {
*out = pool_[idx].get();
......@@ -909,10 +1139,10 @@ class HistogramPool {
}
/*!
* \brief Move data from one index to another index
* \param src_idx
* \param dst_idx
*/
* \brief Move data from one index to another index
* \param src_idx
* \param dst_idx
*/
void Move(int src_idx, int dst_idx) {
if (is_enough_) {
std::swap(pool_[src_idx], pool_[dst_idx]);
......@@ -934,7 +1164,9 @@ class HistogramPool {
private:
std::vector<std::unique_ptr<FeatureHistogram[]>> pool_;
std::vector<std::vector<hist_t, Common::AlignmentAllocator<hist_t, kAlignedSize>>> data_;
std::vector<
std::vector<hist_t, Common::AlignmentAllocator<hist_t, kAlignedSize>>>
data_;
std::vector<FeatureMetainfo> feature_metas_;
int cache_size_;
int total_size_;
......@@ -946,4 +1178,4 @@ class HistogramPool {
};
} // namespace LightGBM
#endif // LightGBM_TREELEARNER_FEATURE_HISTOGRAM_HPP_
#endif // LightGBM_TREELEARNER_FEATURE_HISTOGRAM_HPP_
......@@ -38,16 +38,16 @@ void FeatureParallelTreeLearner<TREELEARNER_T>::BeforeTrain() {
for (int i = 0; i < this->train_data_->num_total_features(); ++i) {
int inner_feature_index = this->train_data_->InnerFeatureIndex(i);
if (inner_feature_index == -1) { continue; }
if (this->is_feature_used_[inner_feature_index]) {
if (this->col_sampler_.is_feature_used_bytree()[inner_feature_index]) {
int cur_min_machine = static_cast<int>(ArrayArgs<int>::ArgMin(num_bins_distributed));
feature_distribution[cur_min_machine].push_back(inner_feature_index);
num_bins_distributed[cur_min_machine] += this->train_data_->FeatureNumBin(inner_feature_index);
this->is_feature_used_[inner_feature_index] = false;
this->col_sampler_.SetIsFeatureUsedByTree(inner_feature_index, false);
}
}
// get local used features
for (auto fid : feature_distribution[rank_]) {
this->is_feature_used_[fid] = true;
this->col_sampler_.SetIsFeatureUsedByTree(fid, true);
}
}
......
......@@ -735,9 +735,8 @@ void GPUTreeLearner::InitGPU(int platform_id, int device_id) {
SetupKernelArguments();
}
Tree* GPUTreeLearner::Train(const score_t* gradients, const score_t *hessians,
const Json& forced_split_json) {
return SerialTreeLearner::Train(gradients, hessians, forced_split_json);
Tree* GPUTreeLearner::Train(const score_t* gradients, const score_t *hessians) {
return SerialTreeLearner::Train(gradients, hessians);
}
void GPUTreeLearner::ResetTrainingDataInner(const Dataset* train_data, bool is_constant_hessian, bool reset_multi_val_bin) {
......@@ -957,7 +956,7 @@ void GPUTreeLearner::ConstructHistograms(const std::vector<int8_t>& is_feature_u
std::vector<int8_t> is_dense_feature_used(num_features_, 0);
#pragma omp parallel for schedule(static)
for (int feature_index = 0; feature_index < num_features_; ++feature_index) {
if (!is_feature_used_[feature_index]) continue;
if (!col_sampler_.is_feature_used_bytree()[feature_index]) continue;
if (!is_feature_used[feature_index]) continue;
if (train_data_->IsMultiGroup(train_data_->Feature2Group(feature_index))) {
is_sparse_feature_used[feature_index] = 1;
......@@ -1062,7 +1061,7 @@ void GPUTreeLearner::FindBestSplits() {
#if GPU_DEBUG >= 3
for (int feature_index = 0; feature_index < num_features_; ++feature_index) {
if (!is_feature_used_[feature_index]) continue;
if (!col_sampler_.is_feature_used_bytree()[feature_index]) continue;
if (parent_leaf_histogram_array_ != nullptr
&& !parent_leaf_histogram_array_[feature_index].is_splittable()) {
smaller_leaf_histogram_array_[feature_index].set_is_splittable(false);
......
......@@ -47,9 +47,8 @@ class GPUTreeLearner: public SerialTreeLearner {
~GPUTreeLearner();
void Init(const Dataset* train_data, bool is_constant_hessian) override;
void ResetTrainingDataInner(const Dataset* train_data, bool is_constant_hessian, bool reset_multi_val_bin) override;
void ResetIsConstantHessian(bool is_constant_hessian);
Tree* Train(const score_t* gradients, const score_t *hessians,
const Json& forced_split_json) override;
void ResetIsConstantHessian(bool is_constant_hessian) override;
Tree* Train(const score_t* gradients, const score_t *hessians) override;
void SetBaggingData(const Dataset* subset, const data_size_t* used_indices, data_size_t num_data) override {
SerialTreeLearner::SetBaggingData(subset, used_indices, num_data);
......
......@@ -19,8 +19,7 @@
namespace LightGBM {
SerialTreeLearner::SerialTreeLearner(const Config* config)
:config_(config) {
random_ = Random(config_->feature_fraction_seed);
: config_(config), col_sampler_(config) {
}
SerialTreeLearner::~SerialTreeLearner() {
......@@ -55,8 +54,7 @@ void SerialTreeLearner::Init(const Dataset* train_data, bool is_constant_hessian
// initialize data partition
data_partition_.reset(new DataPartition(num_data_, config_->num_leaves));
is_feature_used_.resize(num_features_);
valid_feature_indices_ = train_data_->ValidFeatureIndices();
col_sampler_.SetTrainingData(train_data_);
// initialize ordered gradients and hessians
ordered_gradients_.resize(num_data_);
ordered_hessians_.resize(num_data_);
......@@ -74,15 +72,15 @@ void SerialTreeLearner::GetShareStates(const Dataset* dataset,
bool is_constant_hessian,
bool is_first_time) {
if (is_first_time) {
auto used_feature = GetUsedFeatures(true);
share_state_.reset(dataset->GetShareStates(
ordered_gradients_.data(), ordered_hessians_.data(), used_feature,
is_constant_hessian, config_->force_col_wise, config_->force_row_wise));
ordered_gradients_.data(), ordered_hessians_.data(),
col_sampler_.is_feature_used_bytree(), is_constant_hessian,
config_->force_col_wise, config_->force_row_wise));
} else {
CHECK_NOTNULL(share_state_);
// cannot change is_hist_col_wise during training
share_state_.reset(dataset->GetShareStates(
ordered_gradients_.data(), ordered_hessians_.data(), is_feature_used_,
ordered_gradients_.data(), ordered_hessians_.data(), col_sampler_.is_feature_used_bytree(),
is_constant_hessian, share_state_->is_colwise,
!share_state_->is_colwise));
}
......@@ -102,15 +100,14 @@ void SerialTreeLearner::ResetTrainingDataInner(const Dataset* train_data,
// initialize data partition
data_partition_->ResetNumData(num_data_);
if (reset_multi_val_bin) {
col_sampler_.SetTrainingData(train_data_);
GetShareStates(train_data_, is_constant_hessian, false);
}
// initialize ordered gradients and hessians
ordered_gradients_.resize(num_data_);
ordered_hessians_.resize(num_data_);
if (cegb_ != nullptr) {
cegb_->Init();
}
......@@ -141,6 +138,7 @@ void SerialTreeLearner::ResetConfig(const Config* config) {
} else {
config_ = config;
}
col_sampler_.SetConfig(config_);
histogram_pool_.ResetConfig(train_data_, config_);
if (CostEfficientGradientBoosting::IsEnable(config_)) {
cegb_.reset(new CostEfficientGradientBoosting(this));
......@@ -148,7 +146,7 @@ void SerialTreeLearner::ResetConfig(const Config* config) {
}
}
Tree* SerialTreeLearner::Train(const score_t* gradients, const score_t *hessians, const Json& forced_split_json) {
Tree* SerialTreeLearner::Train(const score_t* gradients, const score_t *hessians) {
Common::FunctionTimer fun_timer("SerialTreeLearner::Train", global_timer);
gradients_ = gradients;
hessians_ = hessians;
......@@ -165,28 +163,21 @@ Tree* SerialTreeLearner::Train(const score_t* gradients, const score_t *hessians
BeforeTrain();
auto tree = std::unique_ptr<Tree>(new Tree(config_->num_leaves));
auto tree_prt = tree.get();
// root leaf
int left_leaf = 0;
int cur_depth = 1;
// only root leaf can be splitted on first time
int right_leaf = -1;
int init_splits = 0;
bool aborted_last_force_split = false;
if (!forced_split_json.is_null()) {
init_splits = ForceSplits(tree.get(), forced_split_json, &left_leaf,
&right_leaf, &cur_depth, &aborted_last_force_split);
}
int init_splits = ForceSplits(tree_prt, &left_leaf, &right_leaf, &cur_depth);
for (int split = init_splits; split < config_->num_leaves - 1; ++split) {
// some initial works before finding best split
if (!aborted_last_force_split && BeforeFindBestSplit(tree.get(), left_leaf, right_leaf)) {
if (BeforeFindBestSplit(tree_prt, left_leaf, right_leaf)) {
// find best threshold for every feature
FindBestSplits();
} else if (aborted_last_force_split) {
aborted_last_force_split = false;
}
}
// Get a leaf with max split gain
int best_leaf = static_cast<int>(ArrayArgs<SplitInfo>::ArgMax(best_split_per_leaf_));
// Get split information for best leaf
......@@ -197,7 +188,7 @@ Tree* SerialTreeLearner::Train(const score_t* gradients, const score_t *hessians
break;
}
// split tree with best leaf
Split(tree.get(), best_leaf, &left_leaf, &right_leaf);
Split(tree_prt, best_leaf, &left_leaf, &right_leaf);
cur_depth = std::max(cur_depth, tree->leaf_depth(left_leaf));
}
Log::Debug("Trained a tree with leaves = %d and max_depth = %d", tree->num_leaves(), cur_depth);
......@@ -220,8 +211,9 @@ Tree* SerialTreeLearner::FitByExistingTree(const Tree* old_tree, const score_t*
sum_grad += gradients[idx];
sum_hess += hessians[idx];
}
double output = FeatureHistogram::CalculateSplittedLeafOutput(sum_grad, sum_hess,
config_->lambda_l1, config_->lambda_l2, config_->max_delta_step);
double output = FeatureHistogram::CalculateSplittedLeafOutput<true, true>(
sum_grad, sum_hess, config_->lambda_l1, config_->lambda_l2,
config_->max_delta_step);
auto old_leaf_output = tree->LeafOutput(i);
auto new_leaf_output = output * tree->shrinkage();
tree->SetLeafOutput(i, config_->refit_decay_rate * old_leaf_output + (1.0 - config_->refit_decay_rate) * new_leaf_output);
......@@ -236,70 +228,13 @@ Tree* SerialTreeLearner::FitByExistingTree(const Tree* old_tree, const std::vect
return FitByExistingTree(old_tree, gradients, hessians);
}
std::vector<int8_t> SerialTreeLearner::GetUsedFeatures(bool is_tree_level) {
std::vector<int8_t> ret(num_features_, 1);
if (config_->feature_fraction >= 1.0f && is_tree_level) {
return ret;
}
if (config_->feature_fraction_bynode >= 1.0f && !is_tree_level) {
return ret;
}
std::memset(ret.data(), 0, sizeof(int8_t) * num_features_);
const int min_used_features = std::min(2, static_cast<int>(valid_feature_indices_.size()));
if (is_tree_level) {
int used_feature_cnt = static_cast<int>(std::round(valid_feature_indices_.size() * config_->feature_fraction));
used_feature_cnt = std::max(used_feature_cnt, min_used_features);
used_feature_indices_ = random_.Sample(static_cast<int>(valid_feature_indices_.size()), used_feature_cnt);
int omp_loop_size = static_cast<int>(used_feature_indices_.size());
#pragma omp parallel for schedule(static, 512) if (omp_loop_size >= 1024)
for (int i = 0; i < omp_loop_size; ++i) {
int used_feature = valid_feature_indices_[used_feature_indices_[i]];
int inner_feature_index = train_data_->InnerFeatureIndex(used_feature);
CHECK_GE(inner_feature_index, 0);
ret[inner_feature_index] = 1;
}
} else if (used_feature_indices_.size() <= 0) {
int used_feature_cnt = static_cast<int>(std::round(valid_feature_indices_.size() * config_->feature_fraction_bynode));
used_feature_cnt = std::max(used_feature_cnt, min_used_features);
auto sampled_indices = random_.Sample(static_cast<int>(valid_feature_indices_.size()), used_feature_cnt);
int omp_loop_size = static_cast<int>(sampled_indices.size());
#pragma omp parallel for schedule(static, 512) if (omp_loop_size >= 1024)
for (int i = 0; i < omp_loop_size; ++i) {
int used_feature = valid_feature_indices_[sampled_indices[i]];
int inner_feature_index = train_data_->InnerFeatureIndex(used_feature);
CHECK_GE(inner_feature_index, 0);
ret[inner_feature_index] = 1;
}
} else {
int used_feature_cnt = static_cast<int>(std::round(used_feature_indices_.size() * config_->feature_fraction_bynode));
used_feature_cnt = std::max(used_feature_cnt, min_used_features);
auto sampled_indices = random_.Sample(static_cast<int>(used_feature_indices_.size()), used_feature_cnt);
int omp_loop_size = static_cast<int>(sampled_indices.size());
#pragma omp parallel for schedule(static, 512) if (omp_loop_size >= 1024)
for (int i = 0; i < omp_loop_size; ++i) {
int used_feature = valid_feature_indices_[used_feature_indices_[sampled_indices[i]]];
int inner_feature_index = train_data_->InnerFeatureIndex(used_feature);
CHECK_GE(inner_feature_index, 0);
ret[inner_feature_index] = 1;
}
}
return ret;
}
void SerialTreeLearner::BeforeTrain() {
Common::FunctionTimer fun_timer("SerialTreeLearner::BeforeTrain", global_timer);
// reset histogram pool
histogram_pool_.ResetMap();
if (config_->feature_fraction < 1.0f) {
is_feature_used_ = GetUsedFeatures(true);
} else {
#pragma omp parallel for schedule(static, 512) if (num_features_ >= 1024)
for (int i = 0; i < num_features_; ++i) {
is_feature_used_[i] = 1;
}
}
train_data_->InitTrain(is_feature_used_, share_state_.get());
col_sampler_.ResetByTree();
train_data_->InitTrain(col_sampler_.is_feature_used_bytree(), share_state_.get());
// initialize data partition
data_partition_->Init();
......@@ -367,9 +302,9 @@ bool SerialTreeLearner::BeforeFindBestSplit(const Tree* tree, int left_leaf, int
void SerialTreeLearner::FindBestSplits() {
std::vector<int8_t> is_feature_used(num_features_, 0);
#pragma omp parallel for schedule(static, 1024) if (num_features_ >= 2048)
#pragma omp parallel for schedule(static, 256) if (num_features_ >= 512)
for (int feature_index = 0; feature_index < num_features_; ++feature_index) {
if (!is_feature_used_[feature_index]) continue;
if (!col_sampler_.is_feature_used_bytree()[feature_index]) continue;
if (parent_leaf_histogram_array_ != nullptr
&& !parent_leaf_histogram_array_[feature_index].is_splittable()) {
smaller_leaf_histogram_array_[feature_index].set_is_splittable(false);
......@@ -413,12 +348,8 @@ void SerialTreeLearner::FindBestSplitsFromHistograms(
"SerialTreeLearner::FindBestSplitsFromHistograms", global_timer);
std::vector<SplitInfo> smaller_best(share_state_->num_threads);
std::vector<SplitInfo> larger_best(share_state_->num_threads);
std::vector<int8_t> smaller_node_used_features(num_features_, 1);
std::vector<int8_t> larger_node_used_features(num_features_, 1);
if (config_->feature_fraction_bynode < 1.0f) {
smaller_node_used_features = GetUsedFeatures(false);
larger_node_used_features = GetUsedFeatures(false);
}
std::vector<int8_t> smaller_node_used_features = col_sampler_.GetByNode();
std::vector<int8_t> larger_node_used_features = col_sampler_.GetByNode();
OMP_INIT_EX();
// find splits
#pragma omp parallel for schedule(static)
......@@ -477,18 +408,21 @@ void SerialTreeLearner::FindBestSplitsFromHistograms(
}
}
int32_t SerialTreeLearner::ForceSplits(Tree* tree, const Json& forced_split_json, int* left_leaf,
int* right_leaf, int *cur_depth,
bool *aborted_last_force_split) {
int32_t SerialTreeLearner::ForceSplits(Tree* tree, int* left_leaf,
int* right_leaf, int *cur_depth) {
bool abort_last_forced_split = false;
if (forced_split_json_ == nullptr) {
return 0;
}
int32_t result_count = 0;
// start at root leaf
*left_leaf = 0;
std::queue<std::pair<Json, int>> q;
Json left = forced_split_json;
Json left = *forced_split_json_;
Json right;
bool left_smaller = true;
std::unordered_map<int, SplitInfo> forceSplitMap;
q.push(std::make_pair(forced_split_json, *left_leaf));
q.push(std::make_pair(left, *left_leaf));
while (!q.empty()) {
// before processing next node from queue, store info for current left/right leaf
// store "best split" for left and right, even if they might be overwritten by forced split
......@@ -546,88 +480,13 @@ int32_t SerialTreeLearner::ForceSplits(Tree* tree, const Json& forced_split_json
int current_leaf = pair.second;
// split info should exist because searching in bfs fashion - should have added from parent
if (forceSplitMap.find(current_leaf) == forceSplitMap.end()) {
*aborted_last_force_split = true;
abort_last_forced_split = true;
break;
}
SplitInfo current_split_info = forceSplitMap[current_leaf];
const int inner_feature_index = train_data_->InnerFeatureIndex(
current_split_info.feature);
auto threshold_double = train_data_->RealThreshold(
inner_feature_index, current_split_info.threshold);
// split tree, will return right leaf
*left_leaf = current_leaf;
auto next_leaf_id = tree->NextLeafId();
if (train_data_->FeatureBinMapper(inner_feature_index)->bin_type() == BinType::NumericalBin) {
data_partition_->Split(current_leaf, train_data_, inner_feature_index,
&current_split_info.threshold, 1,
current_split_info.default_left, next_leaf_id);
current_split_info.left_count = data_partition_->leaf_count(*left_leaf);
current_split_info.right_count = data_partition_->leaf_count(next_leaf_id);
*right_leaf = tree->Split(current_leaf,
inner_feature_index,
current_split_info.feature,
current_split_info.threshold,
threshold_double,
static_cast<double>(current_split_info.left_output),
static_cast<double>(current_split_info.right_output),
static_cast<data_size_t>(current_split_info.left_count),
static_cast<data_size_t>(current_split_info.right_count),
static_cast<double>(current_split_info.left_sum_hessian),
static_cast<double>(current_split_info.right_sum_hessian),
static_cast<float>(current_split_info.gain),
train_data_->FeatureBinMapper(inner_feature_index)->missing_type(),
current_split_info.default_left);
} else {
std::vector<uint32_t> cat_bitset_inner = Common::ConstructBitset(
current_split_info.cat_threshold.data(), current_split_info.num_cat_threshold);
std::vector<int> threshold_int(current_split_info.num_cat_threshold);
for (int i = 0; i < current_split_info.num_cat_threshold; ++i) {
threshold_int[i] = static_cast<int>(train_data_->RealThreshold(
inner_feature_index, current_split_info.cat_threshold[i]));
}
std::vector<uint32_t> cat_bitset = Common::ConstructBitset(
threshold_int.data(), current_split_info.num_cat_threshold);
data_partition_->Split(current_leaf, train_data_, inner_feature_index,
cat_bitset_inner.data(), static_cast<int>(cat_bitset_inner.size()),
current_split_info.default_left, next_leaf_id);
current_split_info.left_count = data_partition_->leaf_count(*left_leaf);
current_split_info.right_count = data_partition_->leaf_count(next_leaf_id);
*right_leaf = tree->SplitCategorical(current_leaf,
inner_feature_index,
current_split_info.feature,
cat_bitset_inner.data(),
static_cast<int>(cat_bitset_inner.size()),
cat_bitset.data(),
static_cast<int>(cat_bitset.size()),
static_cast<double>(current_split_info.left_output),
static_cast<double>(current_split_info.right_output),
static_cast<data_size_t>(current_split_info.left_count),
static_cast<data_size_t>(current_split_info.right_count),
static_cast<double>(current_split_info.left_sum_hessian),
static_cast<double>(current_split_info.right_sum_hessian),
static_cast<float>(current_split_info.gain),
train_data_->FeatureBinMapper(inner_feature_index)->missing_type());
}
#ifdef DEBUG
CHECK(*right_leaf == next_leaf_id);
#endif
if (current_split_info.left_count < current_split_info.right_count) {
left_smaller = true;
smaller_leaf_splits_->Init(*left_leaf, data_partition_.get(),
current_split_info.left_sum_gradient,
current_split_info.left_sum_hessian);
larger_leaf_splits_->Init(*right_leaf, data_partition_.get(),
current_split_info.right_sum_gradient,
current_split_info.right_sum_hessian);
} else {
left_smaller = false;
smaller_leaf_splits_->Init(*right_leaf, data_partition_.get(),
current_split_info.right_sum_gradient, current_split_info.right_sum_hessian);
larger_leaf_splits_->Init(*left_leaf, data_partition_.get(),
current_split_info.left_sum_gradient, current_split_info.left_sum_hessian);
}
best_split_per_leaf_[current_leaf] = forceSplitMap[current_leaf];
Split(tree, current_leaf, left_leaf, right_leaf);
left_smaller = best_split_per_leaf_[current_leaf].left_count <
best_split_per_leaf_[current_leaf].right_count;
left = Json();
right = Json();
if ((pair.first).object_items().count("left") > 0) {
......@@ -645,6 +504,19 @@ int32_t SerialTreeLearner::ForceSplits(Tree* tree, const Json& forced_split_json
result_count++;
*(cur_depth) = std::max(*(cur_depth), tree->leaf_depth(*left_leaf));
}
if (abort_last_forced_split) {
int best_leaf =
static_cast<int>(ArrayArgs<SplitInfo>::ArgMax(best_split_per_leaf_));
const SplitInfo& best_leaf_SplitInfo = best_split_per_leaf_[best_leaf];
if (best_leaf_SplitInfo.gain <= 0.0) {
Log::Warning("No further splits with positive gain, best gain: %f",
best_leaf_SplitInfo.gain);
return config_->num_leaves;
}
Split(tree, best_leaf, left_leaf, right_leaf);
*(cur_depth) = std::max(*(cur_depth), tree->leaf_depth(*left_leaf));
result_count++;
}
return result_count;
}
......
......@@ -9,6 +9,7 @@
#include <LightGBM/tree.h>
#include <LightGBM/tree_learner.h>
#include <LightGBM/utils/array_args.h>
#include <LightGBM/utils/json11.h>
#include <LightGBM/utils/random.h>
#include <string>
......@@ -18,6 +19,7 @@
#include <random>
#include <vector>
#include "col_sampler.hpp"
#include "data_partition.hpp"
#include "feature_histogram.hpp"
#include "leaf_splits.hpp"
......@@ -63,8 +65,15 @@ class SerialTreeLearner: public TreeLearner {
void ResetConfig(const Config* config) override;
Tree* Train(const score_t* gradients, const score_t *hessians,
const Json& forced_split_json) override;
inline void SetForcedSplit(const Json* forced_split_json) override {
if (forced_split_json != nullptr && !forced_split_json->is_null()) {
forced_split_json_ = forced_split_json;
} else {
forced_split_json_ = nullptr;
}
}
Tree* Train(const score_t* gradients, const score_t *hessians) override;
Tree* FitByExistingTree(const Tree* old_tree, const score_t* gradients, const score_t* hessians) const override;
......@@ -113,7 +122,6 @@ class SerialTreeLearner: public TreeLearner {
void GetShareStates(const Dataset* dataset, bool is_constant_hessian, bool is_first_time);
virtual std::vector<int8_t> GetUsedFeatures(bool is_tree_level);
/*!
* \brief Some initial works before training
*/
......@@ -142,12 +150,12 @@ class SerialTreeLearner: public TreeLearner {
SplitInner(tree, best_leaf, left_leaf, right_leaf, true);
}
void SplitInner(Tree* tree, int best_leaf, int* left_leaf, int* right_leaf, bool update_cnt);
void SplitInner(Tree* tree, int best_leaf, int* left_leaf, int* right_leaf,
bool update_cnt);
/* Force splits with forced_split_json dict and then return num splits forced.*/
virtual int32_t ForceSplits(Tree* tree, const Json& forced_split_json, int* left_leaf,
int* right_leaf, int* cur_depth,
bool *aborted_last_force_split);
int32_t ForceSplits(Tree* tree, int* left_leaf, int* right_leaf,
int* cur_depth);
/*!
* \brief Get the number of data in a leaf
......@@ -168,12 +176,6 @@ class SerialTreeLearner: public TreeLearner {
const score_t* hessians_;
/*! \brief training data partition on leaves */
std::unique_ptr<DataPartition> data_partition_;
/*! \brief used for generate used features */
Random random_;
/*! \brief used for sub feature training, is_feature_used_[i] = false means don't used feature i */
std::vector<int8_t> is_feature_used_;
/*! \brief used feature indices in current tree */
std::vector<int> used_feature_indices_;
/*! \brief pointer to histograms array of parent of current leaves */
FeatureHistogram* parent_leaf_histogram_array_;
/*! \brief pointer to histograms array of smaller leaf */
......@@ -192,7 +194,6 @@ class SerialTreeLearner: public TreeLearner {
std::unique_ptr<LeafSplits> smaller_leaf_splits_;
/*! \brief stores best thresholds for all feature for larger leaf */
std::unique_ptr<LeafSplits> larger_leaf_splits_;
std::vector<int> valid_feature_indices_;
#ifdef USE_GPU
/*! \brief gradients of current iteration, ordered for cache optimized, aligned to 4K page */
......@@ -209,6 +210,8 @@ class SerialTreeLearner: public TreeLearner {
HistogramPool histogram_pool_;
/*! \brief config of tree learner*/
const Config* config_;
ColSampler col_sampler_;
const Json* forced_split_json_;
std::unique_ptr<TrainingShareStates> share_state_;
std::unique_ptr<CostEfficientGradientBoosting> cegb_;
};
......
......@@ -66,7 +66,7 @@ void VotingParallelTreeLearner<TREELEARNER_T>::Init(const Dataset* train_data, b
auto num_total_bin = train_data->NumTotalBin();
smaller_leaf_histogram_data_.resize(num_total_bin);
larger_leaf_histogram_data_.resize(num_total_bin);
HistogramPool::SetFeatureInfo(train_data, this->config_, &feature_metas_);
HistogramPool::SetFeatureInfo<true, true>(train_data, this->config_, &feature_metas_);
uint64_t offset = 0;
for (int j = 0; j < train_data->num_features(); ++j) {
offset += static_cast<uint64_t>(train_data->SubFeatureBinOffset(j));
......@@ -91,7 +91,7 @@ void VotingParallelTreeLearner<TREELEARNER_T>::ResetConfig(const Config* config)
this->histogram_pool_.ResetConfig(this->train_data_, &local_config_);
global_data_count_in_leaf_.resize(this->config_->num_leaves);
HistogramPool::SetFeatureInfoConfig(this->train_data_, config, &feature_metas_);
HistogramPool::SetFeatureInfo<false, true>(this->train_data_, config, &feature_metas_);
}
template <typename TREELEARNER_T>
......@@ -247,7 +247,7 @@ void VotingParallelTreeLearner<TREELEARNER_T>::FindBestSplits() {
std::vector<int8_t> is_feature_used(this->num_features_, 0);
#pragma omp parallel for schedule(static)
for (int feature_index = 0; feature_index < this->num_features_; ++feature_index) {
if (!this->is_feature_used_[feature_index]) continue;
if (!this->col_sampler_.is_feature_used_bytree()[feature_index]) continue;
if (this->parent_leaf_histogram_array_ != nullptr
&& !this->parent_leaf_histogram_array_[feature_index].is_splittable()) {
this->smaller_leaf_histogram_array_[feature_index].set_is_splittable(false);
......@@ -351,12 +351,10 @@ template <typename TREELEARNER_T>
void VotingParallelTreeLearner<TREELEARNER_T>::FindBestSplitsFromHistograms(const std::vector<int8_t>&, bool) {
std::vector<SplitInfo> smaller_bests_per_thread(this->share_state_->num_threads);
std::vector<SplitInfo> larger_bests_per_thread(this->share_state_->num_threads);
std::vector<int8_t> smaller_node_used_features(this->num_features_, 1);
std::vector<int8_t> larger_node_used_features(this->num_features_, 1);
if (this->config_->feature_fraction_bynode < 1.0f) {
smaller_node_used_features = this->GetUsedFeatures(false);
larger_node_used_features = this->GetUsedFeatures(false);
}
std::vector<int8_t> smaller_node_used_features =
this->col_sampler_.GetByNode();
std::vector<int8_t> larger_node_used_features =
this->col_sampler_.GetByNode();
// find best split from local aggregated histograms
OMP_INIT_EX();
......@@ -429,7 +427,7 @@ void VotingParallelTreeLearner<TREELEARNER_T>::FindBestSplitsFromHistograms(cons
template <typename TREELEARNER_T>
void VotingParallelTreeLearner<TREELEARNER_T>::Split(Tree* tree, int best_Leaf, int* left_leaf, int* right_leaf) {
this->SplitInner(tree, best_Leaf, left_leaf, right_leaf, false);
TREELEARNER_T::SplitInner(tree, best_Leaf, left_leaf, right_leaf, false);
const SplitInfo& best_split_info = this->best_split_per_leaf_[best_Leaf];
// set the global number of data for leaves
global_data_count_in_leaf_[*left_leaf] = best_split_info.left_count;
......
......@@ -406,7 +406,8 @@ class TestEngine(unittest.TestCase):
'num_class': 10,
'num_leaves': 50,
'min_data': 1,
'verbose': -1
'verbose': -1,
'gpu_use_dp': True
}
lgb_train = lgb.Dataset(X_train, y_train, params=params)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train, params=params)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment