Commit 172caee1 authored by Guolin Ke's avatar Guolin Ke Committed by Nikita Titov
Browse files

fix warnings on Windows (#1711)

parent f53116af
...@@ -47,7 +47,7 @@ public: ...@@ -47,7 +47,7 @@ public:
tmp_grad_.resize(num_data_); tmp_grad_.resize(num_data_);
tmp_hess_.resize(num_data_); tmp_hess_.resize(num_data_);
} }
tmp_score_.resize(num_data_, 0.0f); tmp_score_.resize(num_data_, 0.0);
} }
void ResetConfig(const Config* config) override { void ResetConfig(const Config* config) override {
...@@ -74,31 +74,31 @@ public: ...@@ -74,31 +74,31 @@ public:
tmp_grad_.resize(num_data_); tmp_grad_.resize(num_data_);
tmp_hess_.resize(num_data_); tmp_hess_.resize(num_data_);
} }
tmp_score_.resize(num_data_, 0.0f); tmp_score_.resize(num_data_, 0.0);
} }
void GetRFTargets(const Dataset* train_data) { void GetRFTargets(const Dataset* train_data) {
auto label_ptr = train_data->metadata().label(); auto label_ptr = train_data->metadata().label();
std::fill(hessians_.begin(), hessians_.end(), 1); std::fill(hessians_.begin(), hessians_.end(), 1.0f);
if (num_tree_per_iteration_ == 1) { if (num_tree_per_iteration_ == 1) {
OMP_INIT_EX(); OMP_INIT_EX();
#pragma omp parallel for schedule(static,1) #pragma omp parallel for schedule(static,1)
for (data_size_t i = 0; i < train_data->num_data(); ++i) { for (data_size_t i = 0; i < train_data->num_data(); ++i) {
OMP_LOOP_EX_BEGIN(); OMP_LOOP_EX_BEGIN();
double label = label_ptr[i]; score_t label = label_ptr[i];
gradients_[i] = static_cast<score_t>(-label); gradients_[i] = static_cast<score_t>(-label);
OMP_LOOP_EX_END(); OMP_LOOP_EX_END();
} }
OMP_THROW_EX(); OMP_THROW_EX();
} }
else { else {
std::fill(gradients_.begin(), gradients_.end(), 0); std::fill(gradients_.begin(), gradients_.end(), 0.0f);
OMP_INIT_EX(); OMP_INIT_EX();
#pragma omp parallel for schedule(static,1) #pragma omp parallel for schedule(static,1)
for (data_size_t i = 0; i < train_data->num_data(); ++i) { for (data_size_t i = 0; i < train_data->num_data(); ++i) {
OMP_LOOP_EX_BEGIN(); OMP_LOOP_EX_BEGIN();
double label = label_ptr[i]; score_t label = label_ptr[i];
gradients_[i + static_cast<int>(label) * num_data_] = -1; gradients_[i + static_cast<int>(label) * num_data_] = -1.0f;
OMP_LOOP_EX_END(); OMP_LOOP_EX_END();
} }
OMP_THROW_EX(); OMP_THROW_EX();
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment