Unverified Commit 753b0e9c authored by Belinda Trotta's avatar Belinda Trotta Committed by GitHub
Browse files

Fix compiler warnings caused by implicit type conversion (fixes #3677) (#3729)

* Fix compiler warnings caused by implicit type conversion

* Fix more warnings

* Fix more warnings
parent 415c0cb5
......@@ -361,7 +361,7 @@ class Dataset {
if (has_raw_) {
int feat_ind = numeric_feature_map_[feature_idx];
if (feat_ind >= 0) {
raw_data_[feat_ind][row_idx] = inner_data.second;
raw_data_[feat_ind][row_idx] = static_cast<float>(inner_data.second);
}
}
}
......@@ -374,7 +374,7 @@ class Dataset {
if (has_raw_) {
int feat_ind = numeric_feature_map_[feature_idx];
if (feat_ind >= 0) {
raw_data_[feat_ind][row_idx] = value;
raw_data_[feat_ind][row_idx] = static_cast<float>(value);
}
}
}
......
......@@ -1125,7 +1125,7 @@ void DatasetLoader::ConstructBinMappersFromTextData(int rank, int num_machines,
Common::Vector2Ptr<double>(&sample_values).data(),
Common::VectorSize<int>(sample_indices).data(), static_cast<int>(sample_indices.size()), sample_data.size(), config_);
if (dataset->has_raw()) {
dataset->ResizeRaw(sample_data.size());
dataset->ResizeRaw(static_cast<int>(sample_data.size()));
}
}
......@@ -1163,7 +1163,7 @@ void DatasetLoader::ExtractFeaturesFromMemory(std::vector<std::string>* text_dat
int sub_feature = dataset->feature2subfeature_[feature_idx];
dataset->feature_groups_[group]->PushData(tid, sub_feature, i, inner_data.second);
if (dataset->has_raw()) {
feature_row[feature_idx] = inner_data.second;
feature_row[feature_idx] = static_cast<float>(inner_data.second);
}
} else {
if (inner_data.first == weight_idx_) {
......@@ -1220,7 +1220,7 @@ void DatasetLoader::ExtractFeaturesFromMemory(std::vector<std::string>* text_dat
int sub_feature = dataset->feature2subfeature_[feature_idx];
dataset->feature_groups_[group]->PushData(tid, sub_feature, i, inner_data.second);
if (dataset->has_raw()) {
feature_row[feature_idx] = inner_data.second;
feature_row[feature_idx] = static_cast<float>(inner_data.second);
}
} else {
if (inner_data.first == weight_idx_) {
......@@ -1293,7 +1293,7 @@ void DatasetLoader::ExtractFeaturesFromFile(const char* filename, const Parser*
int sub_feature = dataset->feature2subfeature_[feature_idx];
dataset->feature_groups_[group]->PushData(tid, sub_feature, start_idx + i, inner_data.second);
if (dataset->has_raw()) {
feature_row[feature_idx] = inner_data.second;
feature_row[feature_idx] = static_cast<float>(inner_data.second);
}
} else {
if (inner_data.first == weight_idx_) {
......
......@@ -379,7 +379,7 @@ std::string Tree::ToString() const {
<< ArrayToString(leaf_const_, num_leaves_) << '\n';
std::vector<int> num_feat(num_leaves_);
for (int i = 0; i < num_leaves_; ++i) {
num_feat[i] = leaf_coeff_[i].size();
num_feat[i] = static_cast<int>(leaf_coeff_[i].size());
}
str_buf << "num_features="
<< ArrayToString(num_feat, num_leaves_) << '\n';
......
......@@ -201,7 +201,7 @@ void LinearTreeLearner::CalculateLinear(Tree* tree, bool is_refit, const score_t
std::vector<std::vector<int>> leaf_features;
std::vector<int> leaf_num_features;
std::vector<std::vector<const float*>> raw_data_ptr;
int max_num_features = 0;
size_t max_num_features = 0;
for (int i = 0; i < num_leaves; ++i) {
std::vector<int> raw_features;
if (is_refit) {
......@@ -224,8 +224,8 @@ void LinearTreeLearner::CalculateLinear(Tree* tree, bool is_refit, const score_t
}
leaf_features.push_back(numerical_features);
raw_data_ptr.push_back(data_ptr);
leaf_num_features.push_back(numerical_features.size());
if (static_cast<int>(numerical_features.size()) > max_num_features) {
leaf_num_features.push_back(static_cast<int>(numerical_features.size()));
if (numerical_features.size() > max_num_features) {
max_num_features = numerical_features.size();
}
}
......@@ -233,16 +233,16 @@ void LinearTreeLearner::CalculateLinear(Tree* tree, bool is_refit, const score_t
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_threads; ++i) {
for (int leaf_num = 0; leaf_num < num_leaves; ++leaf_num) {
int num_feat = leaf_features[leaf_num].size();
std::fill(XTHX_by_thread_[i][leaf_num].begin(), XTHX_by_thread_[i][leaf_num].begin() + (num_feat + 1) * (num_feat + 2) / 2, 0);
std::fill(XTg_by_thread_[i][leaf_num].begin(), XTg_by_thread_[i][leaf_num].begin() + num_feat + 1, 0);
size_t num_feat = leaf_features[leaf_num].size();
std::fill(XTHX_by_thread_[i][leaf_num].begin(), XTHX_by_thread_[i][leaf_num].begin() + (num_feat + 1) * (num_feat + 2) / 2, 0.0f);
std::fill(XTg_by_thread_[i][leaf_num].begin(), XTg_by_thread_[i][leaf_num].begin() + num_feat + 1, 0.0f);
}
}
#pragma omp parallel for schedule(static)
for (int leaf_num = 0; leaf_num < num_leaves; ++leaf_num) {
int num_feat = leaf_features[leaf_num].size();
std::fill(XTHX_[leaf_num].begin(), XTHX_[leaf_num].begin() + (num_feat + 1) * (num_feat + 2) / 2, 0);
std::fill(XTg_[leaf_num].begin(), XTg_[leaf_num].begin() + num_feat + 1, 0);
size_t num_feat = leaf_features[leaf_num].size();
std::fill(XTHX_[leaf_num].begin(), XTHX_[leaf_num].begin() + (num_feat + 1) * (num_feat + 2) / 2, 0.0f);
std::fill(XTg_[leaf_num].begin(), XTg_[leaf_num].begin() + num_feat + 1, 0.0f);
}
std::vector<std::vector<int>> num_nonzero;
for (int i = 0; i < num_threads; ++i) {
......@@ -283,11 +283,11 @@ void LinearTreeLearner::CalculateLinear(Tree* tree, bool is_refit, const score_t
}
}
curr_row[num_feat] = 1.0;
double h = hessians[i];
double g = gradients[i];
float h = static_cast<float>(hessians[i]);
float g = static_cast<float>(gradients[i]);
int j = 0;
for (int feat1 = 0; feat1 < num_feat + 1; ++feat1) {
double f1_val = curr_row[feat1];
float f1_val = curr_row[feat1];
XTg_by_thread_[tid][leaf_num][feat1] += f1_val * g;
f1_val *= h;
for (int feat2 = feat1; feat2 < num_feat + 1; ++feat2) {
......@@ -304,11 +304,11 @@ void LinearTreeLearner::CalculateLinear(Tree* tree, bool is_refit, const score_t
for (int tid = 0; tid < num_threads; ++tid) {
#pragma omp parallel for schedule(static)
for (int leaf_num = 0; leaf_num < num_leaves; ++leaf_num) {
int num_feat = leaf_features[leaf_num].size();
for (int j = 0; j < (num_feat + 1) * (num_feat + 2) / 2; ++j) {
size_t num_feat = leaf_features[leaf_num].size();
for (size_t j = 0; j < (num_feat + 1) * (num_feat + 2) / 2; ++j) {
XTHX_[leaf_num][j] += XTHX_by_thread_[tid][leaf_num][j];
}
for (int feat1 = 0; feat1 < num_feat + 1; ++feat1) {
for (size_t feat1 = 0; feat1 < num_feat + 1; ++feat1) {
XTg_[leaf_num][feat1] += XTg_by_thread_[tid][leaf_num][feat1];
}
if (HAS_NAN) {
......@@ -337,12 +337,12 @@ void LinearTreeLearner::CalculateLinear(Tree* tree, bool is_refit, const score_t
}
continue;
}
int num_feat = leaf_features[leaf_num].size();
size_t num_feat = leaf_features[leaf_num].size();
Eigen::MatrixXd XTHX_mat(num_feat + 1, num_feat + 1);
Eigen::MatrixXd XTg_mat(num_feat + 1, 1);
int j = 0;
for (int feat1 = 0; feat1 < num_feat + 1; ++feat1) {
for (int feat2 = feat1; feat2 < num_feat + 1; ++feat2) {
size_t j = 0;
for (size_t feat1 = 0; feat1 < num_feat + 1; ++feat1) {
for (size_t feat2 = feat1; feat2 < num_feat + 1; ++feat2) {
XTHX_mat(feat1, feat2) = XTHX_[leaf_num][j];
XTHX_mat(feat2, feat1) = XTHX_mat(feat1, feat2);
if ((feat1 == feat2) && (feat1 < num_feat)) {
......
......@@ -72,7 +72,7 @@ class LinearTreeLearner: public SerialTreeLearner {
for (int feat : tree->LeafFeaturesInner(leaf_num)) {
feat_ptr[leaf_num].push_back(train_data_->raw_index(feat));
}
leaf_num_features[leaf_num] = feat_ptr[leaf_num].size();
leaf_num_features[leaf_num] = static_cast<int>(feat_ptr[leaf_num].size());
}
OMP_INIT_EX();
#pragma omp parallel for schedule(static) if (num_data_ > 1024)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment