Commit 899151fc authored by Nikita Titov's avatar Nikita Titov Committed by Guolin Ke
Browse files

[python] decode error description (#1362)

* decode error description

* added break line char in log massages
parent 79d27770
...@@ -34,7 +34,7 @@ using namespace LightGBM; ...@@ -34,7 +34,7 @@ using namespace LightGBM;
LGBM_SE EncodeChar(LGBM_SE dest, const char* src, LGBM_SE buf_len, LGBM_SE actual_len, size_t str_len) { LGBM_SE EncodeChar(LGBM_SE dest, const char* src, LGBM_SE buf_len, LGBM_SE actual_len, size_t str_len) {
if (str_len > INT32_MAX) { if (str_len > INT32_MAX) {
Log::Fatal("Don't support large string in R-package."); Log::Fatal("Don't support large string in R-package");
} }
R_INT_PTR(actual_len)[0] = static_cast<int>(str_len); R_INT_PTR(actual_len)[0] = static_cast<int>(str_len);
if (R_AS_INT(buf_len) < static_cast<int>(str_len)) { return dest; } if (R_AS_INT(buf_len) < static_cast<int>(str_len)) { return dest; }
......
...@@ -20,4 +20,4 @@ int main(int argc, char** argv) { ...@@ -20,4 +20,4 @@ int main(int argc, char** argv) {
std::cerr << "Unknown Exceptions" << std::endl; std::cerr << "Unknown Exceptions" << std::endl;
exit(-1); exit(-1);
} }
} }
\ No newline at end of file
...@@ -130,8 +130,8 @@ void DCGCalculator::CheckLabel(const label_t* label, data_size_t num_data) { ...@@ -130,8 +130,8 @@ void DCGCalculator::CheckLabel(const label_t* label, data_size_t num_data) {
for (data_size_t i = 0; i < num_data; ++i) { for (data_size_t i = 0; i < num_data; ++i) {
label_t delta = std::fabs(label[i] - static_cast<int>(label[i])); label_t delta = std::fabs(label[i] - static_cast<int>(label[i]));
if (delta > kEpsilon) { if (delta > kEpsilon) {
Log::Fatal("label should be int type (met %f) for ranking task, \ Log::Fatal("label should be int type (met %f) for ranking task,\n"
for the gain of label, please set the label_gain parameter.", label[i]); "for the gain of label, please set the label_gain parameter", label[i]);
} }
if (static_cast<size_t>(label[i]) >= label_gain_.size() || label[i] < 0) { if (static_cast<size_t>(label[i]) >= label_gain_.size() || label[i] < 0) {
Log::Fatal("label (%d) excel the max range %d", label[i], label_gain_.size()); Log::Fatal("label (%d) excel the max range %d", label[i], label_gain_.size());
......
...@@ -44,7 +44,7 @@ public: ...@@ -44,7 +44,7 @@ public:
Log::Fatal("For MAP metric, there should be query information"); Log::Fatal("For MAP metric, there should be query information");
} }
num_queries_ = metadata.num_queries(); num_queries_ = metadata.num_queries();
Log::Info("total groups: %d , total data: %d", num_queries_, num_data_); Log::Info("Total groups: %d, total data: %d", num_queries_, num_data_);
// get query weights // get query weights
query_weights_ = metadata.query_weights(); query_weights_ = metadata.query_weights();
if (query_weights_ == nullptr) { if (query_weights_ == nullptr) {
......
...@@ -174,4 +174,3 @@ RecursiveHalvingMap RecursiveHalvingMap::Construct(int rank, int num_machines) { ...@@ -174,4 +174,3 @@ RecursiveHalvingMap RecursiveHalvingMap::Construct(int rank, int num_machines) {
} }
} // namespace LightGBM } // namespace LightGBM
...@@ -30,4 +30,3 @@ Linkers::~Linkers() { ...@@ -30,4 +30,3 @@ Linkers::~Linkers() {
} // namespace LightGBM } // namespace LightGBM
#endif // USE_MPI #endif // USE_MPI
...@@ -110,8 +110,8 @@ void Linkers::ParseMachineList(const std::string& machines, const std::string& f ...@@ -110,8 +110,8 @@ void Linkers::ParseMachineList(const std::string& machines, const std::string& f
client_ports_.push_back(atoi(str_after_split[1].c_str())); client_ports_.push_back(atoi(str_after_split[1].c_str()));
} }
if (client_ips_.empty()) { if (client_ips_.empty()) {
Log::Fatal("Cannot find any ip and port. \ Log::Fatal("Cannot find any ip and port.\n"
Please check machine_list_filename or machines parameter."); "Please check machine_list_filename or machines parameter");
} }
if (client_ips_.size() != static_cast<size_t>(num_machines_)) { if (client_ips_.size() != static_cast<size_t>(num_machines_)) {
Log::Warning("World size is larger than the machine_list size, change world size to %d", client_ips_.size()); Log::Warning("World size is larger than the machine_list size, change world size to %d", client_ips_.size());
......
...@@ -121,14 +121,14 @@ public: ...@@ -121,14 +121,14 @@ public:
} }
if (setsockopt(sockfd_, SOL_SOCKET, SO_RCVBUF, reinterpret_cast<const char*>(&SocketConfig::kSocketBufferSize), sizeof(SocketConfig::kSocketBufferSize)) != 0) { if (setsockopt(sockfd_, SOL_SOCKET, SO_RCVBUF, reinterpret_cast<const char*>(&SocketConfig::kSocketBufferSize), sizeof(SocketConfig::kSocketBufferSize)) != 0) {
Log::Warning("Set SO_RCVBUF failed, please increase your net.core.rmem_max to 100k at least."); Log::Warning("Set SO_RCVBUF failed, please increase your net.core.rmem_max to 100k at least");
} }
if (setsockopt(sockfd_, SOL_SOCKET, SO_SNDBUF, reinterpret_cast<const char*>(&SocketConfig::kSocketBufferSize), sizeof(SocketConfig::kSocketBufferSize)) != 0) { if (setsockopt(sockfd_, SOL_SOCKET, SO_SNDBUF, reinterpret_cast<const char*>(&SocketConfig::kSocketBufferSize), sizeof(SocketConfig::kSocketBufferSize)) != 0) {
Log::Warning("Set SO_SNDBUF failed, please increase your net.core.wmem_max to 100k at least."); Log::Warning("Set SO_SNDBUF failed, please increase your net.core.wmem_max to 100k at least");
} }
if (setsockopt(sockfd_, IPPROTO_TCP, TCP_NODELAY, reinterpret_cast<const char*>(&SocketConfig::kNoDelay), sizeof(SocketConfig::kNoDelay)) != 0) { if (setsockopt(sockfd_, IPPROTO_TCP, TCP_NODELAY, reinterpret_cast<const char*>(&SocketConfig::kNoDelay), sizeof(SocketConfig::kNoDelay)) != 0) {
Log::Warning("Set TCP_NODELAY failed."); Log::Warning("Set TCP_NODELAY failed");
} }
} }
......
...@@ -20,7 +20,7 @@ public: ...@@ -20,7 +20,7 @@ public:
is_unbalance_ = config.is_unbalance; is_unbalance_ = config.is_unbalance;
scale_pos_weight_ = static_cast<double>(config.scale_pos_weight); scale_pos_weight_ = static_cast<double>(config.scale_pos_weight);
if(is_unbalance_ && std::fabs(scale_pos_weight_ - 1.0f) > 1e-6) { if(is_unbalance_ && std::fabs(scale_pos_weight_ - 1.0f) > 1e-6) {
Log::Fatal("Cannot set is_unbalance and scale_pos_weight at the same time."); Log::Fatal("Cannot set is_unbalance and scale_pos_weight at the same time");
} }
is_pos_ = is_pos; is_pos_ = is_pos;
if (is_pos_ == nullptr) { if (is_pos_ == nullptr) {
...@@ -61,7 +61,7 @@ public: ...@@ -61,7 +61,7 @@ public:
} }
} }
if (cnt_negative == 0 || cnt_positive == 0) { if (cnt_negative == 0 || cnt_positive == 0) {
Log::Warning("Only contain one class."); Log::Warning("Contains only one class");
// not need to boost. // not need to boost.
num_data_ = 0; num_data_ = 0;
} }
......
...@@ -30,7 +30,7 @@ public: ...@@ -30,7 +30,7 @@ public:
} }
} }
if (num_class_ < 0) { if (num_class_ < 0) {
Log::Fatal("Objective should contains num_class field"); Log::Fatal("Objective should contain num_class field");
} }
} }
...@@ -161,7 +161,7 @@ public: ...@@ -161,7 +161,7 @@ public:
} }
} }
if (num_class_ < 0) { if (num_class_ < 0) {
Log::Fatal("Objective should contains num_class field"); Log::Fatal("Objective should contain num_class field");
} }
if (sigmoid_ <= 0.0) { if (sigmoid_ <= 0.0) {
Log::Fatal("Sigmoid parameter %f should be greater than zero", sigmoid_); Log::Fatal("Sigmoid parameter %f should be greater than zero", sigmoid_);
......
...@@ -366,7 +366,7 @@ public: ...@@ -366,7 +366,7 @@ public:
explicit RegressionPoissonLoss(const ObjectiveConfig& config): RegressionL2loss(config) { explicit RegressionPoissonLoss(const ObjectiveConfig& config): RegressionL2loss(config) {
max_delta_step_ = static_cast<double>(config.poisson_max_delta_step); max_delta_step_ = static_cast<double>(config.poisson_max_delta_step);
if (sqrt_) { if (sqrt_) {
Log::Warning("cannot use sqrt transform in %s Regression, will auto disable it.", GetName()); Log::Warning("Cannot use sqrt transform in %s Regression, will auto disable it", GetName());
sqrt_ = false; sqrt_ = false;
} }
} }
...@@ -379,7 +379,7 @@ public: ...@@ -379,7 +379,7 @@ public:
void Init(const Metadata& metadata, data_size_t num_data) override { void Init(const Metadata& metadata, data_size_t num_data) override {
if (sqrt_) { if (sqrt_) {
Log::Warning("cannot use sqrt transform in %s Regression, will auto disable it.", GetName()); Log::Warning("Cannot use sqrt transform in %s Regression, will auto disable it", GetName());
sqrt_ = false; sqrt_ = false;
} }
RegressionL2loss::Init(metadata, num_data); RegressionL2loss::Init(metadata, num_data);
...@@ -388,10 +388,10 @@ public: ...@@ -388,10 +388,10 @@ public:
double sumy; double sumy;
Common::ObtainMinMaxSum(label_, num_data_, &miny, (label_t*)nullptr, &sumy); Common::ObtainMinMaxSum(label_, num_data_, &miny, (label_t*)nullptr, &sumy);
if (miny < 0.0f) { if (miny < 0.0f) {
Log::Fatal("[%s]: at least one target label is negative.", GetName()); Log::Fatal("[%s]: at least one target label is negative", GetName());
} }
if (sumy == 0.0f) { if (sumy == 0.0f) {
Log::Fatal("[%s]: sum of labels is zero.", GetName()); Log::Fatal("[%s]: sum of labels is zero", GetName());
} }
} }
...@@ -556,7 +556,7 @@ public: ...@@ -556,7 +556,7 @@ public:
RegressionL2loss::Init(metadata, num_data); RegressionL2loss::Init(metadata, num_data);
for (data_size_t i = 0; i < num_data_; ++i) { for (data_size_t i = 0; i < num_data_; ++i) {
if (std::fabs(label_[i]) < 1) { if (std::fabs(label_[i]) < 1) {
Log::Warning("Met 'abs(label) < 1', will convert them to '1' in Mape objective and metric."); Log::Warning("Met 'abs(label) < 1', will convert them to '1' in MAPE objective and metric");
break; break;
} }
} }
......
...@@ -60,10 +60,10 @@ public: ...@@ -60,10 +60,10 @@ public:
double sumw; double sumw;
Common::ObtainMinMaxSum(weights_, num_data_, &minw, (label_t*)nullptr, &sumw); Common::ObtainMinMaxSum(weights_, num_data_, &minw, (label_t*)nullptr, &sumw);
if (minw < 0.0f) { if (minw < 0.0f) {
Log::Fatal("[%s]: at least one weight is negative.", GetName()); Log::Fatal("[%s]: at least one weight is negative", GetName());
} }
if (sumw == 0.0f) { if (sumw == 0.0f) {
Log::Fatal("[%s]: sum of weights is zero.", GetName()); Log::Fatal("[%s]: sum of weights is zero", GetName());
} }
} }
...@@ -123,7 +123,7 @@ public: ...@@ -123,7 +123,7 @@ public:
} }
double pavg = suml / sumw; double pavg = suml / sumw;
double initscore = std::log(pavg / (1.0f - pavg)); double initscore = std::log(pavg / (1.0f - pavg));
Log::Info("[%s:%s]: pavg=%f -> initscore=%f", GetName(), __func__, pavg, initscore); Log::Info("[%s:%s]: pavg = %f -> initscore = %f", GetName(), __func__, pavg, initscore);
return initscore; return initscore;
} }
...@@ -163,7 +163,7 @@ public: ...@@ -163,7 +163,7 @@ public:
Common::ObtainMinMaxSum(weights_, num_data_, &min_weight_, &max_weight_, (label_t*)nullptr); Common::ObtainMinMaxSum(weights_, num_data_, &min_weight_, &max_weight_, (label_t*)nullptr);
if (min_weight_ <= 0.0f) { if (min_weight_ <= 0.0f) {
Log::Fatal("[%s]: at least one weight is non-positive.", GetName()); Log::Fatal("[%s]: at least one weight is non-positive", GetName());
} }
// Issue an info statement about this ratio // Issue an info statement about this ratio
...@@ -248,7 +248,7 @@ public: ...@@ -248,7 +248,7 @@ public:
} }
double havg = suml / sumw; double havg = suml / sumw;
double initscore = std::log(std::exp(havg) - 1.0f); double initscore = std::log(std::exp(havg) - 1.0f);
Log::Info("[%s:%s]: havg=%f -> initscore=%f", GetName(), __func__, havg, initscore); Log::Info("[%s:%s]: havg = %f -> initscore = %f", GetName(), __func__, havg, initscore);
return initscore; return initscore;
} }
......
...@@ -109,7 +109,7 @@ int GPUTreeLearner::GetNumWorkgroupsPerFeature(data_size_t leaf_num_data) { ...@@ -109,7 +109,7 @@ int GPUTreeLearner::GetNumWorkgroupsPerFeature(data_size_t leaf_num_data) {
#if GPU_DEBUG >= 4 #if GPU_DEBUG >= 4
printf("Computing histogram for %d examples and (%d * %d) feature groups\n", leaf_num_data, dword_features_, num_dense_feature4_); printf("Computing histogram for %d examples and (%d * %d) feature groups\n", leaf_num_data, dword_features_, num_dense_feature4_);
printf("We can have at most %d workgroups per feature4 for efficiency reasons.\n" printf("We can have at most %d workgroups per feature4 for efficiency reasons.\n"
"Best workgroup size per feature for full utilization is %d\n", (int)ceil(t), (1 << exp_workgroups_per_feature)); "Best workgroup size per feature for full utilization is %d\n", (int)ceil(t), (1 << exp_workgroups_per_feature));
#endif #endif
exp_workgroups_per_feature = std::min(exp_workgroups_per_feature, (int)ceil(log((double)t)/log(2.0))); exp_workgroups_per_feature = std::min(exp_workgroups_per_feature, (int)ceil(log((double)t)/log(2.0)));
if (exp_workgroups_per_feature < 0) if (exp_workgroups_per_feature < 0)
...@@ -129,7 +129,7 @@ void GPUTreeLearner::GPUHistogram(data_size_t leaf_num_data, bool use_all_featur ...@@ -129,7 +129,7 @@ void GPUTreeLearner::GPUHistogram(data_size_t leaf_num_data, bool use_all_featur
int num_workgroups = (1 << exp_workgroups_per_feature) * num_dense_feature4_; int num_workgroups = (1 << exp_workgroups_per_feature) * num_dense_feature4_;
if (num_workgroups > preallocd_max_num_wg_) { if (num_workgroups > preallocd_max_num_wg_) {
preallocd_max_num_wg_ = num_workgroups; preallocd_max_num_wg_ = num_workgroups;
Log::Info("Increasing preallocd_max_num_wg_ to %d for launching more workgroups.", preallocd_max_num_wg_); Log::Info("Increasing preallocd_max_num_wg_ to %d for launching more workgroups", preallocd_max_num_wg_);
device_subhistograms_.reset(new boost::compute::vector<char>( device_subhistograms_.reset(new boost::compute::vector<char>(
preallocd_max_num_wg_ * dword_features_ * device_bin_size_ * hist_bin_entry_sz_, ctx_)); preallocd_max_num_wg_ * dword_features_ * device_bin_size_ * hist_bin_entry_sz_, ctx_));
// we need to refresh the kernel arguments after reallocating // we need to refresh the kernel arguments after reallocating
...@@ -141,7 +141,7 @@ void GPUTreeLearner::GPUHistogram(data_size_t leaf_num_data, bool use_all_featur ...@@ -141,7 +141,7 @@ void GPUTreeLearner::GPUHistogram(data_size_t leaf_num_data, bool use_all_featur
} }
} }
#if GPU_DEBUG >= 4 #if GPU_DEBUG >= 4
printf("setting exp_workgroups_per_feature to %d, using %u work groups\n", exp_workgroups_per_feature, num_workgroups); printf("Setting exp_workgroups_per_feature to %d, using %u work groups\n", exp_workgroups_per_feature, num_workgroups);
printf("Constructing histogram with %d examples\n", leaf_num_data); printf("Constructing histogram with %d examples\n", leaf_num_data);
#endif #endif
...@@ -388,7 +388,7 @@ void GPUTreeLearner::AllocateGPUMemory() { ...@@ -388,7 +388,7 @@ void GPUTreeLearner::AllocateGPUMemory() {
for (int s_idx = 0; s_idx < 8; ++s_idx) { for (int s_idx = 0; s_idx < 8; ++s_idx) {
bin_iters[s_idx] = train_data_->FeatureGroupIterator(dense_ind[s_idx]); bin_iters[s_idx] = train_data_->FeatureGroupIterator(dense_ind[s_idx]);
if (dynamic_cast<Dense4bitsBinIterator*>(bin_iters[s_idx]) == 0) { if (dynamic_cast<Dense4bitsBinIterator*>(bin_iters[s_idx]) == 0) {
Log::Fatal("GPU tree learner assumes that all bins are Dense4bitsBin when num_bin <= 16, but feature %d is not.", dense_ind[s_idx]); Log::Fatal("GPU tree learner assumes that all bins are Dense4bitsBin when num_bin <= 16, but feature %d is not", dense_ind[s_idx]);
} }
} }
// this guarantees that the RawGet() function is inlined, rather than using virtual function dispatching // this guarantees that the RawGet() function is inlined, rather than using virtual function dispatching
...@@ -432,12 +432,12 @@ void GPUTreeLearner::AllocateGPUMemory() { ...@@ -432,12 +432,12 @@ void GPUTreeLearner::AllocateGPUMemory() {
} }
} }
else { else {
Log::Fatal("Bug in GPU tree builder: only DenseBin and Dense4bitsBin are supported!"); Log::Fatal("Bug in GPU tree builder: only DenseBin and Dense4bitsBin are supported");
} }
} }
} }
else { else {
Log::Fatal("Bug in GPU tree builder: dword_features_ can only be 4 or 8!"); Log::Fatal("Bug in GPU tree builder: dword_features_ can only be 4 or 8");
} }
queue_.enqueue_write_buffer(device_features_->get_buffer(), queue_.enqueue_write_buffer(device_features_->get_buffer(),
i * num_data_ * sizeof(Feature4), num_data_ * sizeof(Feature4), host4); i * num_data_ * sizeof(Feature4), num_data_ * sizeof(Feature4), host4);
...@@ -472,7 +472,7 @@ void GPUTreeLearner::AllocateGPUMemory() { ...@@ -472,7 +472,7 @@ void GPUTreeLearner::AllocateGPUMemory() {
} }
} }
else { else {
Log::Fatal("GPU tree learner assumes that all bins are Dense4bitsBin when num_bin <= 16, but feature %d is not.", dense_dword_ind[i]); Log::Fatal("GPU tree learner assumes that all bins are Dense4bitsBin when num_bin <= 16, but feature %d is not", dense_dword_ind[i]);
} }
} }
else if (dword_features_ == 4) { else if (dword_features_ == 4) {
...@@ -494,11 +494,11 @@ void GPUTreeLearner::AllocateGPUMemory() { ...@@ -494,11 +494,11 @@ void GPUTreeLearner::AllocateGPUMemory() {
} }
} }
else { else {
Log::Fatal("BUG in GPU tree builder: only DenseBin and Dense4bitsBin are supported!"); Log::Fatal("BUG in GPU tree builder: only DenseBin and Dense4bitsBin are supported");
} }
} }
else { else {
Log::Fatal("Bug in GPU tree builder: dword_features_ can only be 4 or 8!"); Log::Fatal("Bug in GPU tree builder: dword_features_ can only be 4 or 8");
} }
} }
// fill the leftover features // fill the leftover features
...@@ -538,7 +538,7 @@ void GPUTreeLearner::AllocateGPUMemory() { ...@@ -538,7 +538,7 @@ void GPUTreeLearner::AllocateGPUMemory() {
} }
// data transfer time // data transfer time
std::chrono::duration<double, std::milli> end_time = std::chrono::steady_clock::now() - start_time; std::chrono::duration<double, std::milli> end_time = std::chrono::steady_clock::now() - start_time;
Log::Info("%d dense feature groups (%.2f MB) transfered to GPU in %f secs. %d sparse feature groups.", Log::Info("%d dense feature groups (%.2f MB) transfered to GPU in %f secs. %d sparse feature groups",
dense_feature_group_map_.size(), ((dense_feature_group_map_.size() + (dword_features_ - 1)) / dword_features_) * num_data_ * sizeof(Feature4) / (1024.0 * 1024.0), dense_feature_group_map_.size(), ((dense_feature_group_map_.size() + (dword_features_ - 1)) / dword_features_) * num_data_ * sizeof(Feature4) / (1024.0 * 1024.0),
end_time * 1e-3, sparse_feature_group_map_.size()); end_time * 1e-3, sparse_feature_group_map_.size());
#if GPU_DEBUG >= 1 #if GPU_DEBUG >= 1
...@@ -861,7 +861,7 @@ bool GPUTreeLearner::BeforeFindBestSplit(const Tree* tree, int left_leaf, int ri ...@@ -861,7 +861,7 @@ bool GPUTreeLearner::BeforeFindBestSplit(const Tree* tree, int left_leaf, int ri
// copy indices to the GPU: // copy indices to the GPU:
#if GPU_DEBUG >= 2 #if GPU_DEBUG >= 2
Log::Info("Copying indices, gradients and hessians to GPU..."); Log::Info("Copying indices, gradients and hessians to GPU...");
printf("indices size %d being copied (left = %d, right = %d)\n", end - begin,num_data_in_left_child,num_data_in_right_child); printf("Indices size %d being copied (left = %d, right = %d)\n", end - begin,num_data_in_left_child,num_data_in_right_child);
#endif #endif
indices_future_ = boost::compute::copy_async(indices + begin, indices + end, device_data_indices_->begin(), queue_); indices_future_ = boost::compute::copy_async(indices + begin, indices + end, device_data_indices_->begin(), queue_);
...@@ -882,7 +882,7 @@ bool GPUTreeLearner::BeforeFindBestSplit(const Tree* tree, int left_leaf, int ri ...@@ -882,7 +882,7 @@ bool GPUTreeLearner::BeforeFindBestSplit(const Tree* tree, int left_leaf, int ri
gradients_future_ = queue_.enqueue_write_buffer_async(device_gradients_, 0, (end - begin) * sizeof(score_t), ptr_pinned_gradients_); gradients_future_ = queue_.enqueue_write_buffer_async(device_gradients_, 0, (end - begin) * sizeof(score_t), ptr_pinned_gradients_);
#if GPU_DEBUG >= 2 #if GPU_DEBUG >= 2
Log::Info("gradients/hessians/indiex copied to device with size %d", end - begin); Log::Info("Gradients/hessians/indices copied to device with size %d", end - begin);
#endif #endif
} }
return SerialTreeLearner::BeforeFindBestSplit(tree, left_leaf, right_leaf); return SerialTreeLearner::BeforeFindBestSplit(tree, left_leaf, right_leaf);
...@@ -958,7 +958,7 @@ bool GPUTreeLearner::ConstructGPUHistogramsAsync( ...@@ -958,7 +958,7 @@ bool GPUTreeLearner::ConstructGPUHistogramsAsync(
return false; return false;
} }
#if GPU_DEBUG >= 1 #if GPU_DEBUG >= 1
printf("feature masks:\n"); printf("Feature masks:\n");
for (unsigned int i = 0; i < feature_masks_.size(); ++i) { for (unsigned int i = 0; i < feature_masks_.size(); ++i) {
printf("%d ", feature_masks_[i]); printf("%d ", feature_masks_[i]);
} }
...@@ -1084,10 +1084,10 @@ void GPUTreeLearner::FindBestSplits() { ...@@ -1084,10 +1084,10 @@ void GPUTreeLearner::FindBestSplits() {
continue; continue;
} }
size_t bin_size = train_data_->FeatureNumBin(feature_index) + 1; size_t bin_size = train_data_->FeatureNumBin(feature_index) + 1;
printf("feature %d smaller leaf:\n", feature_index); printf("Feature %d smaller leaf:\n", feature_index);
PrintHistograms(smaller_leaf_histogram_array_[feature_index].RawData() - 1, bin_size); PrintHistograms(smaller_leaf_histogram_array_[feature_index].RawData() - 1, bin_size);
if (larger_leaf_splits_ == nullptr || larger_leaf_splits_->LeafIndex() < 0) { continue; } if (larger_leaf_splits_ == nullptr || larger_leaf_splits_->LeafIndex() < 0) { continue; }
printf("feature %d larger leaf:\n", feature_index); printf("Feature %d larger leaf:\n", feature_index);
PrintHistograms(larger_leaf_histogram_array_[feature_index].RawData() - 1, bin_size); PrintHistograms(larger_leaf_histogram_array_[feature_index].RawData() - 1, bin_size);
} }
#endif #endif
...@@ -1096,7 +1096,7 @@ void GPUTreeLearner::FindBestSplits() { ...@@ -1096,7 +1096,7 @@ void GPUTreeLearner::FindBestSplits() {
void GPUTreeLearner::Split(Tree* tree, int best_Leaf, int* left_leaf, int* right_leaf) { void GPUTreeLearner::Split(Tree* tree, int best_Leaf, int* left_leaf, int* right_leaf) {
const SplitInfo& best_split_info = best_split_per_leaf_[best_Leaf]; const SplitInfo& best_split_info = best_split_per_leaf_[best_Leaf];
#if GPU_DEBUG >= 2 #if GPU_DEBUG >= 2
printf("spliting leaf %d with feature %d thresh %d gain %f stat %f %f %f %f\n", best_Leaf, best_split_info.feature, best_split_info.threshold, best_split_info.gain, best_split_info.left_sum_gradient, best_split_info.right_sum_gradient, best_split_info.left_sum_hessian, best_split_info.right_sum_hessian); printf("Spliting leaf %d with feature %d thresh %d gain %f stat %f %f %f %f\n", best_Leaf, best_split_info.feature, best_split_info.threshold, best_split_info.gain, best_split_info.left_sum_gradient, best_split_info.right_sum_gradient, best_split_info.left_sum_hessian, best_split_info.right_sum_hessian);
#endif #endif
SerialTreeLearner::Split(tree, best_Leaf, left_leaf, right_leaf); SerialTreeLearner::Split(tree, best_Leaf, left_leaf, right_leaf);
if (Network::num_machines() == 1) { if (Network::num_machines() == 1) {
...@@ -1125,4 +1125,3 @@ void GPUTreeLearner::Split(Tree* tree, int best_Leaf, int* left_leaf, int* right ...@@ -1125,4 +1125,3 @@ void GPUTreeLearner::Split(Tree* tree, int best_Leaf, int* left_leaf, int* right
} // namespace LightGBM } // namespace LightGBM
#endif // USE_GPU #endif // USE_GPU
...@@ -271,7 +271,8 @@ class GPUTreeLearner: public SerialTreeLearner { ...@@ -271,7 +271,8 @@ class GPUTreeLearner: public SerialTreeLearner {
public: public:
#pragma warning(disable : 4702) #pragma warning(disable : 4702)
explicit GPUTreeLearner(const TreeConfig* tree_config) : SerialTreeLearner(tree_config) { explicit GPUTreeLearner(const TreeConfig* tree_config) : SerialTreeLearner(tree_config) {
Log::Fatal("GPU Tree Learner was not enabled in this build. Recompile with CMake option -DUSE_GPU=1"); Log::Fatal("GPU Tree Learner was not enabled in this build.\n"
"Please recompile with CMake option -DUSE_GPU=1");
} }
}; };
......
...@@ -208,4 +208,3 @@ inline void SyncUpGlobalBestSplit(char* input_buffer_, char* output_buffer_, Spl ...@@ -208,4 +208,3 @@ inline void SyncUpGlobalBestSplit(char* input_buffer_, char* output_buffer_, Spl
} // namespace LightGBM } // namespace LightGBM
#endif // LightGBM_TREELEARNER_PARALLEL_TREE_LEARNER_H_ #endif // LightGBM_TREELEARNER_PARALLEL_TREE_LEARNER_H_
...@@ -215,7 +215,7 @@ Tree* SerialTreeLearner::Train(const score_t* gradients, const score_t *hessians ...@@ -215,7 +215,7 @@ Tree* SerialTreeLearner::Train(const score_t* gradients, const score_t *hessians
#endif #endif
cur_depth = std::max(cur_depth, tree->leaf_depth(left_leaf)); cur_depth = std::max(cur_depth, tree->leaf_depth(left_leaf));
} }
Log::Debug("Trained a tree with leaves=%d and max_depth=%d", tree->num_leaves(), cur_depth); Log::Debug("Trained a tree with leaves = %d and max_depth = %d", tree->num_leaves(), cur_depth);
return tree.release(); return tree.release();
} }
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment