Commit 899151fc authored by Nikita Titov's avatar Nikita Titov Committed by Guolin Ke
Browse files

[python] decode error description (#1362)

* decode error description

* added break line char in log massages
parent 79d27770
......@@ -34,7 +34,7 @@ using namespace LightGBM;
LGBM_SE EncodeChar(LGBM_SE dest, const char* src, LGBM_SE buf_len, LGBM_SE actual_len, size_t str_len) {
if (str_len > INT32_MAX) {
Log::Fatal("Don't support large string in R-package.");
Log::Fatal("Don't support large string in R-package");
}
R_INT_PTR(actual_len)[0] = static_cast<int>(str_len);
if (R_AS_INT(buf_len) < static_cast<int>(str_len)) { return dest; }
......
......@@ -130,8 +130,8 @@ void DCGCalculator::CheckLabel(const label_t* label, data_size_t num_data) {
for (data_size_t i = 0; i < num_data; ++i) {
label_t delta = std::fabs(label[i] - static_cast<int>(label[i]));
if (delta > kEpsilon) {
Log::Fatal("label should be int type (met %f) for ranking task, \
for the gain of label, please set the label_gain parameter.", label[i]);
Log::Fatal("label should be int type (met %f) for ranking task,\n"
"for the gain of label, please set the label_gain parameter", label[i]);
}
if (static_cast<size_t>(label[i]) >= label_gain_.size() || label[i] < 0) {
Log::Fatal("label (%d) excel the max range %d", label[i], label_gain_.size());
......
......@@ -44,7 +44,7 @@ public:
Log::Fatal("For MAP metric, there should be query information");
}
num_queries_ = metadata.num_queries();
Log::Info("total groups: %d , total data: %d", num_queries_, num_data_);
Log::Info("Total groups: %d, total data: %d", num_queries_, num_data_);
// get query weights
query_weights_ = metadata.query_weights();
if (query_weights_ == nullptr) {
......
......@@ -174,4 +174,3 @@ RecursiveHalvingMap RecursiveHalvingMap::Construct(int rank, int num_machines) {
}
} // namespace LightGBM
......@@ -30,4 +30,3 @@ Linkers::~Linkers() {
} // namespace LightGBM
#endif // USE_MPI
......@@ -110,8 +110,8 @@ void Linkers::ParseMachineList(const std::string& machines, const std::string& f
client_ports_.push_back(atoi(str_after_split[1].c_str()));
}
if (client_ips_.empty()) {
Log::Fatal("Cannot find any ip and port. \
Please check machine_list_filename or machines parameter.");
Log::Fatal("Cannot find any ip and port.\n"
"Please check machine_list_filename or machines parameter");
}
if (client_ips_.size() != static_cast<size_t>(num_machines_)) {
Log::Warning("World size is larger than the machine_list size, change world size to %d", client_ips_.size());
......
......@@ -121,14 +121,14 @@ public:
}
if (setsockopt(sockfd_, SOL_SOCKET, SO_RCVBUF, reinterpret_cast<const char*>(&SocketConfig::kSocketBufferSize), sizeof(SocketConfig::kSocketBufferSize)) != 0) {
Log::Warning("Set SO_RCVBUF failed, please increase your net.core.rmem_max to 100k at least.");
Log::Warning("Set SO_RCVBUF failed, please increase your net.core.rmem_max to 100k at least");
}
if (setsockopt(sockfd_, SOL_SOCKET, SO_SNDBUF, reinterpret_cast<const char*>(&SocketConfig::kSocketBufferSize), sizeof(SocketConfig::kSocketBufferSize)) != 0) {
Log::Warning("Set SO_SNDBUF failed, please increase your net.core.wmem_max to 100k at least.");
Log::Warning("Set SO_SNDBUF failed, please increase your net.core.wmem_max to 100k at least");
}
if (setsockopt(sockfd_, IPPROTO_TCP, TCP_NODELAY, reinterpret_cast<const char*>(&SocketConfig::kNoDelay), sizeof(SocketConfig::kNoDelay)) != 0) {
Log::Warning("Set TCP_NODELAY failed.");
Log::Warning("Set TCP_NODELAY failed");
}
}
......
......@@ -20,7 +20,7 @@ public:
is_unbalance_ = config.is_unbalance;
scale_pos_weight_ = static_cast<double>(config.scale_pos_weight);
if(is_unbalance_ && std::fabs(scale_pos_weight_ - 1.0f) > 1e-6) {
Log::Fatal("Cannot set is_unbalance and scale_pos_weight at the same time.");
Log::Fatal("Cannot set is_unbalance and scale_pos_weight at the same time");
}
is_pos_ = is_pos;
if (is_pos_ == nullptr) {
......@@ -61,7 +61,7 @@ public:
}
}
if (cnt_negative == 0 || cnt_positive == 0) {
Log::Warning("Only contain one class.");
Log::Warning("Contains only one class");
// not need to boost.
num_data_ = 0;
}
......
......@@ -30,7 +30,7 @@ public:
}
}
if (num_class_ < 0) {
Log::Fatal("Objective should contains num_class field");
Log::Fatal("Objective should contain num_class field");
}
}
......@@ -161,7 +161,7 @@ public:
}
}
if (num_class_ < 0) {
Log::Fatal("Objective should contains num_class field");
Log::Fatal("Objective should contain num_class field");
}
if (sigmoid_ <= 0.0) {
Log::Fatal("Sigmoid parameter %f should be greater than zero", sigmoid_);
......
......@@ -366,7 +366,7 @@ public:
explicit RegressionPoissonLoss(const ObjectiveConfig& config): RegressionL2loss(config) {
max_delta_step_ = static_cast<double>(config.poisson_max_delta_step);
if (sqrt_) {
Log::Warning("cannot use sqrt transform in %s Regression, will auto disable it.", GetName());
Log::Warning("Cannot use sqrt transform in %s Regression, will auto disable it", GetName());
sqrt_ = false;
}
}
......@@ -379,7 +379,7 @@ public:
void Init(const Metadata& metadata, data_size_t num_data) override {
if (sqrt_) {
Log::Warning("cannot use sqrt transform in %s Regression, will auto disable it.", GetName());
Log::Warning("Cannot use sqrt transform in %s Regression, will auto disable it", GetName());
sqrt_ = false;
}
RegressionL2loss::Init(metadata, num_data);
......@@ -388,10 +388,10 @@ public:
double sumy;
Common::ObtainMinMaxSum(label_, num_data_, &miny, (label_t*)nullptr, &sumy);
if (miny < 0.0f) {
Log::Fatal("[%s]: at least one target label is negative.", GetName());
Log::Fatal("[%s]: at least one target label is negative", GetName());
}
if (sumy == 0.0f) {
Log::Fatal("[%s]: sum of labels is zero.", GetName());
Log::Fatal("[%s]: sum of labels is zero", GetName());
}
}
......@@ -556,7 +556,7 @@ public:
RegressionL2loss::Init(metadata, num_data);
for (data_size_t i = 0; i < num_data_; ++i) {
if (std::fabs(label_[i]) < 1) {
Log::Warning("Met 'abs(label) < 1', will convert them to '1' in Mape objective and metric.");
Log::Warning("Met 'abs(label) < 1', will convert them to '1' in MAPE objective and metric");
break;
}
}
......
......@@ -60,10 +60,10 @@ public:
double sumw;
Common::ObtainMinMaxSum(weights_, num_data_, &minw, (label_t*)nullptr, &sumw);
if (minw < 0.0f) {
Log::Fatal("[%s]: at least one weight is negative.", GetName());
Log::Fatal("[%s]: at least one weight is negative", GetName());
}
if (sumw == 0.0f) {
Log::Fatal("[%s]: sum of weights is zero.", GetName());
Log::Fatal("[%s]: sum of weights is zero", GetName());
}
}
......@@ -123,7 +123,7 @@ public:
}
double pavg = suml / sumw;
double initscore = std::log(pavg / (1.0f - pavg));
Log::Info("[%s:%s]: pavg=%f -> initscore=%f", GetName(), __func__, pavg, initscore);
Log::Info("[%s:%s]: pavg = %f -> initscore = %f", GetName(), __func__, pavg, initscore);
return initscore;
}
......@@ -163,7 +163,7 @@ public:
Common::ObtainMinMaxSum(weights_, num_data_, &min_weight_, &max_weight_, (label_t*)nullptr);
if (min_weight_ <= 0.0f) {
Log::Fatal("[%s]: at least one weight is non-positive.", GetName());
Log::Fatal("[%s]: at least one weight is non-positive", GetName());
}
// Issue an info statement about this ratio
......@@ -248,7 +248,7 @@ public:
}
double havg = suml / sumw;
double initscore = std::log(std::exp(havg) - 1.0f);
Log::Info("[%s:%s]: havg=%f -> initscore=%f", GetName(), __func__, havg, initscore);
Log::Info("[%s:%s]: havg = %f -> initscore = %f", GetName(), __func__, havg, initscore);
return initscore;
}
......
......@@ -129,7 +129,7 @@ void GPUTreeLearner::GPUHistogram(data_size_t leaf_num_data, bool use_all_featur
int num_workgroups = (1 << exp_workgroups_per_feature) * num_dense_feature4_;
if (num_workgroups > preallocd_max_num_wg_) {
preallocd_max_num_wg_ = num_workgroups;
Log::Info("Increasing preallocd_max_num_wg_ to %d for launching more workgroups.", preallocd_max_num_wg_);
Log::Info("Increasing preallocd_max_num_wg_ to %d for launching more workgroups", preallocd_max_num_wg_);
device_subhistograms_.reset(new boost::compute::vector<char>(
preallocd_max_num_wg_ * dword_features_ * device_bin_size_ * hist_bin_entry_sz_, ctx_));
// we need to refresh the kernel arguments after reallocating
......@@ -141,7 +141,7 @@ void GPUTreeLearner::GPUHistogram(data_size_t leaf_num_data, bool use_all_featur
}
}
#if GPU_DEBUG >= 4
printf("setting exp_workgroups_per_feature to %d, using %u work groups\n", exp_workgroups_per_feature, num_workgroups);
printf("Setting exp_workgroups_per_feature to %d, using %u work groups\n", exp_workgroups_per_feature, num_workgroups);
printf("Constructing histogram with %d examples\n", leaf_num_data);
#endif
......@@ -388,7 +388,7 @@ void GPUTreeLearner::AllocateGPUMemory() {
for (int s_idx = 0; s_idx < 8; ++s_idx) {
bin_iters[s_idx] = train_data_->FeatureGroupIterator(dense_ind[s_idx]);
if (dynamic_cast<Dense4bitsBinIterator*>(bin_iters[s_idx]) == 0) {
Log::Fatal("GPU tree learner assumes that all bins are Dense4bitsBin when num_bin <= 16, but feature %d is not.", dense_ind[s_idx]);
Log::Fatal("GPU tree learner assumes that all bins are Dense4bitsBin when num_bin <= 16, but feature %d is not", dense_ind[s_idx]);
}
}
// this guarantees that the RawGet() function is inlined, rather than using virtual function dispatching
......@@ -432,12 +432,12 @@ void GPUTreeLearner::AllocateGPUMemory() {
}
}
else {
Log::Fatal("Bug in GPU tree builder: only DenseBin and Dense4bitsBin are supported!");
Log::Fatal("Bug in GPU tree builder: only DenseBin and Dense4bitsBin are supported");
}
}
}
else {
Log::Fatal("Bug in GPU tree builder: dword_features_ can only be 4 or 8!");
Log::Fatal("Bug in GPU tree builder: dword_features_ can only be 4 or 8");
}
queue_.enqueue_write_buffer(device_features_->get_buffer(),
i * num_data_ * sizeof(Feature4), num_data_ * sizeof(Feature4), host4);
......@@ -472,7 +472,7 @@ void GPUTreeLearner::AllocateGPUMemory() {
}
}
else {
Log::Fatal("GPU tree learner assumes that all bins are Dense4bitsBin when num_bin <= 16, but feature %d is not.", dense_dword_ind[i]);
Log::Fatal("GPU tree learner assumes that all bins are Dense4bitsBin when num_bin <= 16, but feature %d is not", dense_dword_ind[i]);
}
}
else if (dword_features_ == 4) {
......@@ -494,11 +494,11 @@ void GPUTreeLearner::AllocateGPUMemory() {
}
}
else {
Log::Fatal("BUG in GPU tree builder: only DenseBin and Dense4bitsBin are supported!");
Log::Fatal("BUG in GPU tree builder: only DenseBin and Dense4bitsBin are supported");
}
}
else {
Log::Fatal("Bug in GPU tree builder: dword_features_ can only be 4 or 8!");
Log::Fatal("Bug in GPU tree builder: dword_features_ can only be 4 or 8");
}
}
// fill the leftover features
......@@ -538,7 +538,7 @@ void GPUTreeLearner::AllocateGPUMemory() {
}
// data transfer time
std::chrono::duration<double, std::milli> end_time = std::chrono::steady_clock::now() - start_time;
Log::Info("%d dense feature groups (%.2f MB) transfered to GPU in %f secs. %d sparse feature groups.",
Log::Info("%d dense feature groups (%.2f MB) transfered to GPU in %f secs. %d sparse feature groups",
dense_feature_group_map_.size(), ((dense_feature_group_map_.size() + (dword_features_ - 1)) / dword_features_) * num_data_ * sizeof(Feature4) / (1024.0 * 1024.0),
end_time * 1e-3, sparse_feature_group_map_.size());
#if GPU_DEBUG >= 1
......@@ -861,7 +861,7 @@ bool GPUTreeLearner::BeforeFindBestSplit(const Tree* tree, int left_leaf, int ri
// copy indices to the GPU:
#if GPU_DEBUG >= 2
Log::Info("Copying indices, gradients and hessians to GPU...");
printf("indices size %d being copied (left = %d, right = %d)\n", end - begin,num_data_in_left_child,num_data_in_right_child);
printf("Indices size %d being copied (left = %d, right = %d)\n", end - begin,num_data_in_left_child,num_data_in_right_child);
#endif
indices_future_ = boost::compute::copy_async(indices + begin, indices + end, device_data_indices_->begin(), queue_);
......@@ -882,7 +882,7 @@ bool GPUTreeLearner::BeforeFindBestSplit(const Tree* tree, int left_leaf, int ri
gradients_future_ = queue_.enqueue_write_buffer_async(device_gradients_, 0, (end - begin) * sizeof(score_t), ptr_pinned_gradients_);
#if GPU_DEBUG >= 2
Log::Info("gradients/hessians/indiex copied to device with size %d", end - begin);
Log::Info("Gradients/hessians/indices copied to device with size %d", end - begin);
#endif
}
return SerialTreeLearner::BeforeFindBestSplit(tree, left_leaf, right_leaf);
......@@ -958,7 +958,7 @@ bool GPUTreeLearner::ConstructGPUHistogramsAsync(
return false;
}
#if GPU_DEBUG >= 1
printf("feature masks:\n");
printf("Feature masks:\n");
for (unsigned int i = 0; i < feature_masks_.size(); ++i) {
printf("%d ", feature_masks_[i]);
}
......@@ -1084,10 +1084,10 @@ void GPUTreeLearner::FindBestSplits() {
continue;
}
size_t bin_size = train_data_->FeatureNumBin(feature_index) + 1;
printf("feature %d smaller leaf:\n", feature_index);
printf("Feature %d smaller leaf:\n", feature_index);
PrintHistograms(smaller_leaf_histogram_array_[feature_index].RawData() - 1, bin_size);
if (larger_leaf_splits_ == nullptr || larger_leaf_splits_->LeafIndex() < 0) { continue; }
printf("feature %d larger leaf:\n", feature_index);
printf("Feature %d larger leaf:\n", feature_index);
PrintHistograms(larger_leaf_histogram_array_[feature_index].RawData() - 1, bin_size);
}
#endif
......@@ -1096,7 +1096,7 @@ void GPUTreeLearner::FindBestSplits() {
void GPUTreeLearner::Split(Tree* tree, int best_Leaf, int* left_leaf, int* right_leaf) {
const SplitInfo& best_split_info = best_split_per_leaf_[best_Leaf];
#if GPU_DEBUG >= 2
printf("spliting leaf %d with feature %d thresh %d gain %f stat %f %f %f %f\n", best_Leaf, best_split_info.feature, best_split_info.threshold, best_split_info.gain, best_split_info.left_sum_gradient, best_split_info.right_sum_gradient, best_split_info.left_sum_hessian, best_split_info.right_sum_hessian);
printf("Spliting leaf %d with feature %d thresh %d gain %f stat %f %f %f %f\n", best_Leaf, best_split_info.feature, best_split_info.threshold, best_split_info.gain, best_split_info.left_sum_gradient, best_split_info.right_sum_gradient, best_split_info.left_sum_hessian, best_split_info.right_sum_hessian);
#endif
SerialTreeLearner::Split(tree, best_Leaf, left_leaf, right_leaf);
if (Network::num_machines() == 1) {
......@@ -1125,4 +1125,3 @@ void GPUTreeLearner::Split(Tree* tree, int best_Leaf, int* left_leaf, int* right
} // namespace LightGBM
#endif // USE_GPU
......@@ -271,7 +271,8 @@ class GPUTreeLearner: public SerialTreeLearner {
public:
#pragma warning(disable : 4702)
explicit GPUTreeLearner(const TreeConfig* tree_config) : SerialTreeLearner(tree_config) {
Log::Fatal("GPU Tree Learner was not enabled in this build. Recompile with CMake option -DUSE_GPU=1");
Log::Fatal("GPU Tree Learner was not enabled in this build.\n"
"Please recompile with CMake option -DUSE_GPU=1");
}
};
......
......@@ -208,4 +208,3 @@ inline void SyncUpGlobalBestSplit(char* input_buffer_, char* output_buffer_, Spl
} // namespace LightGBM
#endif // LightGBM_TREELEARNER_PARALLEL_TREE_LEARNER_H_
......@@ -215,7 +215,7 @@ Tree* SerialTreeLearner::Train(const score_t* gradients, const score_t *hessians
#endif
cur_depth = std::max(cur_depth, tree->leaf_depth(left_leaf));
}
Log::Debug("Trained a tree with leaves=%d and max_depth=%d", tree->num_leaves(), cur_depth);
Log::Debug("Trained a tree with leaves = %d and max_depth = %d", tree->num_leaves(), cur_depth);
return tree.release();
}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment