"demo/vscode:/vscode.git/clone" did not exist on "17d316f31677ca1ae9f5c83600500cec88514445"
Unverified Commit 952458a9 authored by Ilya Chernov's avatar Ilya Chernov Committed by GitHub
Browse files

Remove redundant whitespaces (#5480)

remove redundant whitespaces
parent 3d4e08e1
...@@ -508,7 +508,7 @@ namespace LightGBM { ...@@ -508,7 +508,7 @@ namespace LightGBM {
const double max_sparse_rate = const double max_sparse_rate =
static_cast<double>(cnt_in_bin[most_freq_bin_]) / total_sample_cnt; static_cast<double>(cnt_in_bin[most_freq_bin_]) / total_sample_cnt;
// When most_freq_bin_ != default_bin_, there are some additional data loading costs. // When most_freq_bin_ != default_bin_, there are some additional data loading costs.
// so use most_freq_bin_ = default_bin_ when there is not so sparse // so use most_freq_bin_ = default_bin_ when there is not so sparse
if (most_freq_bin_ != default_bin_ && max_sparse_rate < kSparseThreshold) { if (most_freq_bin_ != default_bin_ && max_sparse_rate < kSparseThreshold) {
most_freq_bin_ = default_bin_; most_freq_bin_ = default_bin_;
} }
...@@ -705,7 +705,7 @@ namespace LightGBM { ...@@ -705,7 +705,7 @@ namespace LightGBM {
return new MultiValSparseBin<uint32_t, uint32_t>( return new MultiValSparseBin<uint32_t, uint32_t>(
num_data, num_bin, estimate_element_per_row); num_data, num_bin, estimate_element_per_row);
} }
} else { } else {
if (num_bin <= 256) { if (num_bin <= 256) {
return new MultiValSparseBin<size_t, uint8_t>( return new MultiValSparseBin<size_t, uint8_t>(
num_data, num_bin, estimate_element_per_row); num_data, num_bin, estimate_element_per_row);
......
...@@ -248,7 +248,7 @@ class TcpSocket { ...@@ -248,7 +248,7 @@ class TcpSocket {
} }
inline bool Connect(const char *url, int port) { inline bool Connect(const char *url, int port) {
sockaddr_in server_addr = GetAddress(url, port); sockaddr_in server_addr = GetAddress(url, port);
if (connect(sockfd_, reinterpret_cast<const sockaddr*>(&server_addr), sizeof(sockaddr_in)) == 0) { if (connect(sockfd_, reinterpret_cast<const sockaddr*>(&server_addr), sizeof(sockaddr_in)) == 0) {
return true; return true;
} }
......
...@@ -129,7 +129,7 @@ class BinaryLogloss: public ObjectiveFunction { ...@@ -129,7 +129,7 @@ class BinaryLogloss: public ObjectiveFunction {
// calculate gradients and hessians // calculate gradients and hessians
const double response = -label * sigmoid_ / (1.0f + std::exp(label * sigmoid_ * score[i])); const double response = -label * sigmoid_ / (1.0f + std::exp(label * sigmoid_ * score[i]));
const double abs_response = fabs(response); const double abs_response = fabs(response);
gradients[i] = static_cast<score_t>(response * label_weight * weights_[i]); gradients[i] = static_cast<score_t>(response * label_weight * weights_[i]);
hessians[i] = static_cast<score_t>(abs_response * (sigmoid_ - abs_response) * label_weight * weights_[i]); hessians[i] = static_cast<score_t>(abs_response * (sigmoid_ - abs_response) * label_weight * weights_[i]);
} }
} }
......
...@@ -346,7 +346,7 @@ class CUDADataPartition { ...@@ -346,7 +346,7 @@ class CUDADataPartition {
data_size_t* cuda_data_indices_; data_size_t* cuda_data_indices_;
/*! \brief start position of each leaf in cuda_data_indices_ */ /*! \brief start position of each leaf in cuda_data_indices_ */
data_size_t* cuda_leaf_data_start_; data_size_t* cuda_leaf_data_start_;
/*! \brief end position of each leaf in cuda_data_indices_ */ /*! \brief end position of each leaf in cuda_data_indices_ */
data_size_t* cuda_leaf_data_end_; data_size_t* cuda_leaf_data_end_;
/*! \brief number of data in each leaf */ /*! \brief number of data in each leaf */
data_size_t* cuda_leaf_num_data_; data_size_t* cuda_leaf_num_data_;
......
...@@ -266,7 +266,7 @@ void GPUTreeLearner::AllocateGPUMemory() { ...@@ -266,7 +266,7 @@ void GPUTreeLearner::AllocateGPUMemory() {
ptr_pinned_gradients_ = queue_.enqueue_map_buffer(pinned_gradients_, boost::compute::command_queue::map_write_invalidate_region, ptr_pinned_gradients_ = queue_.enqueue_map_buffer(pinned_gradients_, boost::compute::command_queue::map_write_invalidate_region,
0, allocated_num_data_ * sizeof(score_t)); 0, allocated_num_data_ * sizeof(score_t));
pinned_hessians_ = boost::compute::buffer(); // deallocate pinned_hessians_ = boost::compute::buffer(); // deallocate
pinned_hessians_ = boost::compute::buffer(ctx_, allocated_num_data_ * sizeof(score_t), pinned_hessians_ = boost::compute::buffer(ctx_, allocated_num_data_ * sizeof(score_t),
boost::compute::memory_object::read_write | boost::compute::memory_object::use_host_ptr, boost::compute::memory_object::read_write | boost::compute::memory_object::use_host_ptr,
ordered_hessians_.data()); ordered_hessians_.data());
ptr_pinned_hessians_ = queue_.enqueue_map_buffer(pinned_hessians_, boost::compute::command_queue::map_write_invalidate_region, ptr_pinned_hessians_ = queue_.enqueue_map_buffer(pinned_hessians_, boost::compute::command_queue::map_write_invalidate_region,
...@@ -277,7 +277,7 @@ void GPUTreeLearner::AllocateGPUMemory() { ...@@ -277,7 +277,7 @@ void GPUTreeLearner::AllocateGPUMemory() {
device_gradients_ = boost::compute::buffer(ctx_, allocated_num_data_ * sizeof(score_t), device_gradients_ = boost::compute::buffer(ctx_, allocated_num_data_ * sizeof(score_t),
boost::compute::memory_object::read_only, nullptr); boost::compute::memory_object::read_only, nullptr);
device_hessians_ = boost::compute::buffer(); // deallocate device_hessians_ = boost::compute::buffer(); // deallocate
device_hessians_ = boost::compute::buffer(ctx_, allocated_num_data_ * sizeof(score_t), device_hessians_ = boost::compute::buffer(ctx_, allocated_num_data_ * sizeof(score_t),
boost::compute::memory_object::read_only, nullptr); boost::compute::memory_object::read_only, nullptr);
// allocate feature mask, for disabling some feature-groups' histogram calculation // allocate feature mask, for disabling some feature-groups' histogram calculation
feature_masks_.resize(num_dense_feature4_ * dword_features_); feature_masks_.resize(num_dense_feature4_ * dword_features_);
......
...@@ -875,7 +875,7 @@ class AdvancedLeafConstraints : public IntermediateLeafConstraints { ...@@ -875,7 +875,7 @@ class AdvancedLeafConstraints : public IntermediateLeafConstraints {
// for example when adding a constraints cstr2 on thresholds [1:2), // for example when adding a constraints cstr2 on thresholds [1:2),
// on an existing constraints cstr1 on thresholds [0, +inf), // on an existing constraints cstr1 on thresholds [0, +inf),
// the thresholds and constraints must become // the thresholds and constraints must become
// [0, 1, 2] and [cstr1, cstr2, cstr1] // [0, 1, 2] and [cstr1, cstr2, cstr1]
// so since we loop through thresholds only once, // so since we loop through thresholds only once,
// the previous constraint that still applies needs to be recorded // the previous constraint that still applies needs to be recorded
double previous_constraint = use_max_operator double previous_constraint = use_max_operator
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment