Unverified Commit 0df10887 authored by James Lamb's avatar James Lamb Committed by GitHub
Browse files

[ci] [c++] use 'pre-commit' to run 'cpplint', upgrade to 'cpplint' 2.0.2 (#7002)

* [ci] [c++] use 'pre-commit' to run 'cpplint', upgrade to 'cpplint' 2.0.2

* remove bashisms

* one more pipefail use

* another pipefail
parent d9bdda6c
......@@ -100,7 +100,9 @@ class ColSampler {
allowed_features.insert(constraint.begin(), constraint.end());
}
for (int feat : branch_features) {
if (constraint.count(feat) == 0) { break; }
if (constraint.count(feat) == 0) {
break;
}
++num_feat_found;
if (num_feat_found == static_cast<int>(branch_features.size())) {
allowed_features.insert(constraint.begin(), constraint.end());
......
......@@ -128,7 +128,9 @@ void DataParallelTreeLearner<TREELEARNER_T>::BeforeTrain() {
std::vector<int> num_bins_distributed(num_machines_, 0);
for (int i = 0; i < this->train_data_->num_total_features(); ++i) {
int inner_feature_index = this->train_data_->InnerFeatureIndex(i);
if (inner_feature_index == -1) { continue; }
if (inner_feature_index == -1) {
continue;
}
if (this->col_sampler_.is_feature_used_bytree()[inner_feature_index]) {
int cur_min_machine = static_cast<int>(ArrayArgs<int>::ArgMin(num_bins_distributed));
feature_distribution[cur_min_machine].push_back(inner_feature_index);
......
......@@ -42,7 +42,9 @@ void FeatureParallelTreeLearner<TREELEARNER_T>::BeforeTrain() {
std::vector<int> num_bins_distributed(num_machines_, 0);
for (int i = 0; i < this->train_data_->num_total_features(); ++i) {
int inner_feature_index = this->train_data_->InnerFeatureIndex(i);
if (inner_feature_index == -1) { continue; }
if (inner_feature_index == -1) {
continue;
}
if (this->col_sampler_.is_feature_used_bytree()[inner_feature_index]) {
int cur_min_machine = static_cast<int>(ArrayArgs<int>::ArgMin(num_bins_distributed));
feature_distribution[cur_min_machine].push_back(inner_feature_index);
......
......@@ -245,7 +245,7 @@ void GPUTreeLearner::AllocateGPUMemory() {
}
// allocate memory for all features (FIXME: 4 GB barrier on some devices, need to split to multiple buffers)
device_features_.reset();
device_features_ = std::unique_ptr<boost::compute::vector<Feature4>>(new boost::compute::vector<Feature4>((uint64_t)num_dense_feature4_ * num_data_, ctx_));
device_features_ = std::unique_ptr<boost::compute::vector<Feature4>>(new boost::compute::vector<Feature4>(static_cast<uint64_t>(num_dense_feature4_) * num_data_, ctx_));
// unpin old buffer if necessary before destructing them
if (ptr_pinned_gradients_) {
queue_.enqueue_unmap_buffer(pinned_gradients_, ptr_pinned_gradients_);
......@@ -427,7 +427,7 @@ void GPUTreeLearner::AllocateGPUMemory() {
}
#pragma omp critical
queue_.enqueue_write_buffer(device_features_->get_buffer(),
(uint64_t)i * num_data_ * sizeof(Feature4), num_data_ * sizeof(Feature4), host4);
static_cast<uint64_t>(i) * num_data_ * sizeof(Feature4), num_data_ * sizeof(Feature4), host4);
#if GPU_DEBUG >= 1
printf("first example of feature-group tuple is: %d %d %d %d\n", host4[0].s[0], host4[0].s[1], host4[0].s[2], host4[0].s[3]);
printf("Feature-groups copied to device with multipliers ");
......@@ -503,7 +503,7 @@ void GPUTreeLearner::AllocateGPUMemory() {
}
// copying the last 1 to (dword_features - 1) feature-groups in the last tuple
queue_.enqueue_write_buffer(device_features_->get_buffer(),
(num_dense_feature4_ - 1) * (uint64_t)num_data_ * sizeof(Feature4), num_data_ * sizeof(Feature4), host4);
(num_dense_feature4_ - 1) * static_cast<uint64_t>(num_data_) * sizeof(Feature4), num_data_ * sizeof(Feature4), host4);
#if GPU_DEBUG >= 1
printf("Last features copied to device\n");
#endif
......@@ -1089,7 +1089,9 @@ void GPUTreeLearner::FindBestSplits(const Tree* tree) {
size_t bin_size = train_data_->FeatureNumBin(feature_index) + 1;
printf("Feature %d smaller leaf:\n", feature_index);
PrintHistograms(smaller_leaf_histogram_array_[feature_index].RawData() - kHistOffset, bin_size);
if (larger_leaf_splits_ == nullptr || larger_leaf_splits_->leaf_index() < 0) { continue; }
if (larger_leaf_splits_ == nullptr || larger_leaf_splits_->leaf_index() < 0) {
continue;
}
printf("Feature %d larger leaf:\n", feature_index);
PrintHistograms(larger_leaf_histogram_array_[feature_index].RawData() - kHistOffset, bin_size);
}
......
......@@ -368,12 +368,16 @@ bool SerialTreeLearner::BeforeFindBestSplit(const Tree* tree, int left_leaf, int
larger_leaf_histogram_array_ = nullptr;
} else if (num_data_in_left_child < num_data_in_right_child) {
// put parent(left) leaf's histograms into larger leaf's histograms
if (histogram_pool_.Get(left_leaf, &larger_leaf_histogram_array_)) { parent_leaf_histogram_array_ = larger_leaf_histogram_array_; }
if (histogram_pool_.Get(left_leaf, &larger_leaf_histogram_array_)) {
parent_leaf_histogram_array_ = larger_leaf_histogram_array_;
}
histogram_pool_.Move(left_leaf, right_leaf);
histogram_pool_.Get(left_leaf, &smaller_leaf_histogram_array_);
} else {
// put parent(left) leaf's histograms to larger leaf's histograms
if (histogram_pool_.Get(left_leaf, &larger_leaf_histogram_array_)) { parent_leaf_histogram_array_ = larger_leaf_histogram_array_; }
if (histogram_pool_.Get(left_leaf, &larger_leaf_histogram_array_)) {
parent_leaf_histogram_array_ = larger_leaf_histogram_array_;
}
histogram_pool_.Get(right_leaf, &smaller_leaf_histogram_array_);
}
return true;
......
......@@ -268,7 +268,9 @@ void VotingParallelTreeLearner<TREELEARNER_T>::FindBestSplits(const Tree* tree)
#pragma omp parallel for num_threads(OMP_NUM_THREADS()) schedule(static)
for (int feature_index = 0; feature_index < this->num_features_; ++feature_index) {
OMP_LOOP_EX_BEGIN();
if (!is_feature_used[feature_index]) { continue; }
if (!is_feature_used[feature_index]) {
continue;
}
const BinMapper* feature_bin_mapper = this->train_data_->FeatureBinMapper(feature_index);
const int num_bin = feature_bin_mapper->num_bin();
const int offset = static_cast<int>(feature_bin_mapper->GetMostFreqBin() == 0);
......@@ -288,7 +290,9 @@ void VotingParallelTreeLearner<TREELEARNER_T>::FindBestSplits(const Tree* tree)
#pragma omp parallel for num_threads(OMP_NUM_THREADS()) schedule(static)
for (int feature_index = 0; feature_index < this->num_features_; ++feature_index) {
OMP_LOOP_EX_BEGIN();
if (!is_feature_used[feature_index]) { continue; }
if (!is_feature_used[feature_index]) {
continue;
}
const BinMapper* feature_bin_mapper = this->train_data_->FeatureBinMapper(feature_index);
const int num_bin = feature_bin_mapper->num_bin();
const int offset = static_cast<int>(feature_bin_mapper->GetMostFreqBin() == 0);
......@@ -310,7 +314,9 @@ void VotingParallelTreeLearner<TREELEARNER_T>::FindBestSplits(const Tree* tree)
#pragma omp parallel for num_threads(OMP_NUM_THREADS()) schedule(static)
for (int feature_index = 0; feature_index < this->num_features_; ++feature_index) {
OMP_LOOP_EX_BEGIN();
if (!is_feature_used[feature_index]) { continue; }
if (!is_feature_used[feature_index]) {
continue;
}
const int real_feature_index = this->train_data_->RealFeatureIndex(feature_index);
this->train_data_->FixHistogram(feature_index,
this->smaller_leaf_splits_->sum_gradients(), this->smaller_leaf_splits_->sum_hessians(),
......@@ -323,7 +329,9 @@ void VotingParallelTreeLearner<TREELEARNER_T>::FindBestSplits(const Tree* tree)
&smaller_bestsplit_per_features[feature_index],
smaller_leaf_parent_output);
// only has root leaf
if (this->larger_leaf_splits_ == nullptr || this->larger_leaf_splits_->leaf_index() < 0) { continue; }
if (this->larger_leaf_splits_ == nullptr || this->larger_leaf_splits_->leaf_index() < 0) {
continue;
}
if (use_subtract) {
this->larger_leaf_histogram_array_[feature_index].Subtract(this->smaller_leaf_histogram_array_[feature_index]);
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment