Unverified Commit 2027f6b4 authored by Nikita Titov's avatar Nikita Titov Committed by GitHub
Browse files

addressed cpplint error about C-style cast (#2064)

parent 82f803b3
......@@ -259,7 +259,7 @@ class Tree {
}
int_fval = 0;
}
int cat_idx = int(threshold_[node]);
int cat_idx = static_cast<int>(threshold_[node]);
if (Common::FindInBitset(cat_threshold_.data() + cat_boundaries_[cat_idx],
cat_boundaries_[cat_idx + 1] - cat_boundaries_[cat_idx], int_fval)) {
return left_child_[node];
......@@ -268,7 +268,7 @@ class Tree {
}
inline int CategoricalDecisionInner(uint32_t fval, int node) const {
int cat_idx = int(threshold_in_bin_[node]);
int cat_idx = static_cast<int>(threshold_in_bin_[node]);
if (Common::FindInBitset(cat_threshold_inner_.data() + cat_boundaries_inner_[cat_idx],
cat_boundaries_inner_[cat_idx + 1] - cat_boundaries_inner_[cat_idx], fval)) {
return left_child_[node];
......
......@@ -340,7 +340,7 @@ inline static void Uint32ToStr(uint32_t value, char* buffer) {
}
if (value < 10) {
*--buffer = char(value) + '0';
*--buffer = static_cast<char>(value) + '0';
} else {
const unsigned i = value << 1;
*--buffer = kDigitsLut[i + 1];
......
......@@ -238,7 +238,7 @@ bool GBDT::SaveModelToIfElse(int num_iteration, const char* filename) const {
ifs.close();
output_file.close();
return (bool)output_file;
return static_cast<bool>(output_file);
}
std::string GBDT::SaveModelToString(int start_iteration, int num_iteration) const {
......@@ -337,7 +337,7 @@ bool GBDT::SaveModelToFile(int start_iteration, int num_iteration, const char* f
output_file.write(str_to_write.c_str(), str_to_write.size());
output_file.close();
return (bool)output_file;
return static_cast<bool>(output_file);
}
bool GBDT::LoadModelFromString(const char* buffer, size_t len) {
......
......@@ -92,7 +92,7 @@ struct HDFSFile : VirtualFileReader, VirtualFileWriter {
template <typename BufferType>
inline size_t FileOperation(BufferType data, size_t bytes, fileOp<BufferType> op) const {
char* buffer = (char *)data;
char* buffer = reinterpret_cast<char *>(data);
size_t remain = bytes;
while (remain != 0) {
size_t nmax = static_cast<size_t>(std::numeric_limits<tSize>::max());
......
......@@ -439,9 +439,9 @@ struct JsonParser final {
*/
char get_next_token() {
consume_garbage();
if (failed) return (char)0;
if (failed) return char{0};
if (i == str.size())
return fail("Unexpected end of input", (char)0);
return fail("Unexpected end of input", char{0});
return str[i++];
}
......
......@@ -342,7 +342,7 @@ std::string Tree::CategoricalDecisionIfElse(int node) const {
} else {
str_buf << "if (std::isnan(fval)) { int_fval = 0; } else { int_fval = static_cast<int>(fval); }";
}
int cat_idx = int(threshold_[node]);
int cat_idx = static_cast<int>(threshold_[node]);
str_buf << "if (int_fval >= 0 && int_fval < 32 * (";
str_buf << cat_boundaries_[cat_idx + 1] - cat_boundaries_[cat_idx];
str_buf << ") && (((cat_threshold[" << cat_boundaries_[cat_idx];
......
......@@ -86,7 +86,7 @@ class CrossEntropyMetric : public Metric {
sum_weights_ = static_cast<double>(num_data_);
} else {
label_t minw;
Common::ObtainMinMaxSum(weights_, num_data_, &minw, (label_t*)nullptr, &sum_weights_);
Common::ObtainMinMaxSum(weights_, num_data_, &minw, static_cast<label_t*>(nullptr), &sum_weights_);
if (minw < 0.0f) {
Log::Fatal("[%s:%s]: (metric) weights not allowed to be negative", GetName()[0].c_str(), __func__);
}
......@@ -177,7 +177,7 @@ class CrossEntropyLambdaMetric : public Metric {
// check all weights are strictly positive; throw error if not
if (weights_ != nullptr) {
label_t minw;
Common::ObtainMinMaxSum(weights_, num_data_, &minw, (label_t*)nullptr, (label_t*)nullptr);
Common::ObtainMinMaxSum(weights_, num_data_, &minw, static_cast<label_t*>(nullptr), static_cast<label_t*>(nullptr));
if (minw <= 0.0f) {
Log::Fatal("[%s:%s]: (metric) all weights must be positive", GetName()[0].c_str(), __func__);
}
......@@ -261,7 +261,7 @@ class KullbackLeiblerDivergence : public Metric {
sum_weights_ = static_cast<double>(num_data_);
} else {
label_t minw;
Common::ObtainMinMaxSum(weights_, num_data_, &minw, (label_t*)nullptr, &sum_weights_);
Common::ObtainMinMaxSum(weights_, num_data_, &minw, static_cast<label_t*>(nullptr), &sum_weights_);
if (minw < 0.0f) {
Log::Fatal("[%s:%s]: (metric) at least one weight is negative", GetName()[0].c_str(), __func__);
}
......
......@@ -173,7 +173,7 @@ class TcpSocket {
PIP_ADAPTER_INFO pAdapter = NULL;
DWORD dwRetVal = 0;
ULONG ulOutBufLen = sizeof(IP_ADAPTER_INFO);
pAdapterInfo = (IP_ADAPTER_INFO *)MALLOC(sizeof(IP_ADAPTER_INFO));
pAdapterInfo = reinterpret_cast<IP_ADAPTER_INFO *>(MALLOC(sizeof(IP_ADAPTER_INFO)));
if (pAdapterInfo == NULL) {
Log::Fatal("GetAdaptersinfo error: allocating memory");
}
......@@ -181,7 +181,7 @@ class TcpSocket {
// the necessary size into the ulOutBufLen variable
if (GetAdaptersInfo(pAdapterInfo, &ulOutBufLen) == ERROR_BUFFER_OVERFLOW) {
FREE(pAdapterInfo);
pAdapterInfo = (IP_ADAPTER_INFO *)MALLOC(ulOutBufLen);
pAdapterInfo = reinterpret_cast<IP_ADAPTER_INFO *>(MALLOC(ulOutBufLen));
if (pAdapterInfo == NULL) {
Log::Fatal("GetAdaptersinfo error: allocating memory");
}
......
......@@ -430,7 +430,7 @@ class RegressionPoissonLoss: public RegressionL2loss {
// Safety check of labels
label_t miny;
double sumy;
Common::ObtainMinMaxSum(label_, num_data_, &miny, (label_t*)nullptr, &sumy);
Common::ObtainMinMaxSum(label_, num_data_, &miny, static_cast<label_t*>(nullptr), &sumy);
if (miny < 0.0f) {
Log::Fatal("[%s]: at least one target label is negative", GetName());
}
......
......@@ -57,7 +57,7 @@ class CrossEntropy: public ObjectiveFunction {
if (weights_ != nullptr) {
label_t minw;
double sumw;
Common::ObtainMinMaxSum(weights_, num_data_, &minw, (label_t*)nullptr, &sumw);
Common::ObtainMinMaxSum(weights_, num_data_, &minw, static_cast<label_t*>(nullptr), &sumw);
if (minw < 0.0f) {
Log::Fatal("[%s]: at least one weight is negative", GetName());
}
......@@ -160,7 +160,7 @@ class CrossEntropyLambda: public ObjectiveFunction {
Log::Info("[%s:%s]: (objective) labels passed interval [0, 1] check", GetName(), __func__);
if (weights_ != nullptr) {
Common::ObtainMinMaxSum(weights_, num_data_, &min_weight_, &max_weight_, (label_t*)nullptr);
Common::ObtainMinMaxSum(weights_, num_data_, &min_weight_, &max_weight_, static_cast<label_t*>(nullptr));
if (min_weight_ <= 0.0f) {
Log::Fatal("[%s]: at least one weight is non-positive", GetName());
}
......
......@@ -103,14 +103,14 @@ int GPUTreeLearner::GetNumWorkgroupsPerFeature(data_size_t leaf_num_data) {
// we roughly want 256 workgroups per device, and we have num_dense_feature4_ feature tuples.
// also guarantee that there are at least 2K examples per workgroup
double x = 256.0 / num_dense_feature4_;
int exp_workgroups_per_feature = (int)ceil(log2(x));
int exp_workgroups_per_feature = static_cast<int>(ceil(log2(x)));
double t = leaf_num_data / 1024.0;
#if GPU_DEBUG >= 4
printf("Computing histogram for %d examples and (%d * %d) feature groups\n", leaf_num_data, dword_features_, num_dense_feature4_);
printf("We can have at most %d workgroups per feature4 for efficiency reasons.\n"
"Best workgroup size per feature for full utilization is %d\n", (int)ceil(t), (1 << exp_workgroups_per_feature));
"Best workgroup size per feature for full utilization is %d\n", static_cast<int>(ceil(t)), (1 << exp_workgroups_per_feature));
#endif
exp_workgroups_per_feature = std::min(exp_workgroups_per_feature, (int)ceil(log((double)t)/log(2.0)));
exp_workgroups_per_feature = std::min(exp_workgroups_per_feature, static_cast<int>(ceil(log(static_cast<double>(t))/log(2.0))));
if (exp_workgroups_per_feature < 0)
exp_workgroups_per_feature = 0;
if (exp_workgroups_per_feature > kMaxLogWorkgroupsPerFeature)
......@@ -188,7 +188,7 @@ void GPUTreeLearner::GPUHistogram(data_size_t leaf_num_data, bool use_all_featur
template <typename HistType>
void GPUTreeLearner::WaitAndGetHistograms(HistogramBinEntry* histograms) {
HistType* hist_outputs = (HistType*) host_histogram_outputs_;
HistType* hist_outputs = reinterpret_cast<HistType*>(host_histogram_outputs_);
// when the output is ready, the computation is done
histograms_wait_obj_.wait();
#pragma omp parallel for schedule(static)
......@@ -325,9 +325,9 @@ void GPUTreeLearner::AllocateGPUMemory() {
if (ordered_bins_[i] == nullptr) {
dense_dword_ind[k] = i;
// decide if we need to redistribute the bin
double t = device_bin_size_ / (double)train_data_->FeatureGroupNumBin(i);
double t = device_bin_size_ / static_cast<double>(train_data_->FeatureGroupNumBin(i));
// multiplier must be a power of 2
device_bin_mults_.push_back((int)round(pow(2, floor(log2(t)))));
device_bin_mults_.push_back(static_cast<int>(round(pow(2, floor(log2(t))))));
// device_bin_mults_.push_back(1);
#if GPU_DEBUG >= 1
printf("feature-group %d using multiplier %d\n", i, device_bin_mults_.back());
......@@ -348,23 +348,23 @@ void GPUTreeLearner::AllocateGPUMemory() {
// for data transfer time
auto start_time = std::chrono::steady_clock::now();
// Now generate new data structure feature4, and copy data to the device
int nthreads = std::min(omp_get_max_threads(), (int)dense_feature_group_map_.size() / dword_features_);
int nthreads = std::min(omp_get_max_threads(), static_cast<int>(dense_feature_group_map_.size()) / dword_features_);
nthreads = std::max(nthreads, 1);
std::vector<Feature4*> host4_vecs(nthreads);
std::vector<boost::compute::buffer> host4_bufs(nthreads);
std::vector<Feature4*> host4_ptrs(nthreads);
// preallocate arrays for all threads, and pin them
for (int i = 0; i < nthreads; ++i) {
host4_vecs[i] = (Feature4*)boost::alignment::aligned_alloc(4096, num_data_ * sizeof(Feature4));
host4_vecs[i] = reinterpret_cast<Feature4*>(boost::alignment::aligned_alloc(4096, num_data_ * sizeof(Feature4)));
host4_bufs[i] = boost::compute::buffer(ctx_, num_data_ * sizeof(Feature4),
boost::compute::memory_object::read_write | boost::compute::memory_object::use_host_ptr,
host4_vecs[i]);
host4_ptrs[i] = (Feature4*)queue_.enqueue_map_buffer(host4_bufs[i], boost::compute::command_queue::map_write_invalidate_region,
0, num_data_ * sizeof(Feature4));
host4_ptrs[i] = reinterpret_cast<Feature4*>(queue_.enqueue_map_buffer(host4_bufs[i], boost::compute::command_queue::map_write_invalidate_region,
0, num_data_ * sizeof(Feature4)));
}
// building Feature4 bundles; each thread handles dword_features_ features
#pragma omp parallel for schedule(static)
for (int i = 0; i < (int)(dense_feature_group_map_.size() / dword_features_); ++i) {
for (int i = 0; i < static_cast<int>(dense_feature_group_map_.size() / dword_features_); ++i) {
int tid = omp_get_thread_num();
Feature4* host4 = host4_ptrs[tid];
auto dense_ind = dense_feature_group_map_.begin() + i * dword_features_;
......@@ -689,9 +689,9 @@ void GPUTreeLearner::InitGPU(int platform_id, int device_id) {
dev_ = boost::compute::system::default_device();
if (platform_id >= 0 && device_id >= 0) {
const std::vector<boost::compute::platform> platforms = boost::compute::system::platforms();
if ((int)platforms.size() > platform_id) {
if (static_cast<int>(platforms.size()) > platform_id) {
const std::vector<boost::compute::device> platform_devices = platforms[platform_id].devices();
if ((int)platform_devices.size() > device_id) {
if (static_cast<int>(platform_devices.size()) > device_id) {
Log::Info("Using requested OpenCL platform %d device %d", platform_id, device_id);
dev_ = platform_devices[device_id];
}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment