Commit d6ef6cfa authored by Guolin Ke's avatar Guolin Ke
Browse files

some signed unsigned warnings fixed

parent 68f4547b
...@@ -411,7 +411,7 @@ ColumnFunctionFromCSC(const void* col_ptr, int col_ptr_type, const int32_t* indi ...@@ -411,7 +411,7 @@ ColumnFunctionFromCSC(const void* col_ptr, int col_ptr_type, const int32_t* indi
const void* data, int data_type, int64_t ncol_ptr, int64_t nelem); const void* data, int data_type, int64_t ncol_ptr, int64_t nelem);
std::vector<double> std::vector<double>
SampleFromOneColumn(const std::vector<std::pair<int, double>>& data, const std::vector<size_t>& indices); SampleFromOneColumn(const std::vector<std::pair<int, double>>& data, const std::vector<int>& indices);
// exception handle and error msg // exception handle and error msg
......
...@@ -52,12 +52,12 @@ public: ...@@ -52,12 +52,12 @@ public:
* \param K * \param K
* \return K Ordered sampled data from {0,1,...,N-1} * \return K Ordered sampled data from {0,1,...,N-1}
*/ */
inline std::vector<size_t> Sample(size_t N, size_t K) { inline std::vector<int> Sample(int N, int K) {
std::vector<size_t> ret; std::vector<int> ret;
if (K > N || K < 0) { if (K > N || K < 0) {
return ret; return ret;
} }
for (size_t i = 0; i < N; ++i) { for (int i = 0; i < N; ++i) {
double prob = (K - ret.size()) / static_cast<double>(N - i); double prob = (K - ret.size()) / static_cast<double>(N - i);
if (NextDouble() < prob) { if (NextDouble() < prob) {
ret.push_back(i); ret.push_back(i);
......
...@@ -88,7 +88,7 @@ private: ...@@ -88,7 +88,7 @@ private:
// select dropping tree indexes based on drop_rate // select dropping tree indexes based on drop_rate
// if drop rate is too small, skip this step, drop one tree randomly // if drop rate is too small, skip this step, drop one tree randomly
if (drop_rate_ > kEpsilon) { if (drop_rate_ > kEpsilon) {
for (size_t i = 0; i < static_cast<size_t>(iter_); ++i) { for (int i = 0; i < iter_; ++i) {
if (random_for_drop_.NextDouble() < drop_rate_) { if (random_for_drop_.NextDouble() < drop_rate_) {
drop_index_.push_back(i); drop_index_.push_back(i);
} }
...@@ -128,7 +128,7 @@ private: ...@@ -128,7 +128,7 @@ private:
} }
} }
/*! \brief The indexes of dropping trees */ /*! \brief The indexes of dropping trees */
std::vector<size_t> drop_index_; std::vector<int> drop_index_;
/*! \brief Dropping rate */ /*! \brief Dropping rate */
double drop_rate_; double drop_rate_;
/*! \brief Random generator, used to select dropping trees */ /*! \brief Random generator, used to select dropping trees */
......
...@@ -199,7 +199,7 @@ DllExport int LGBM_CreateDatasetFromMat(const void* data, ...@@ -199,7 +199,7 @@ DllExport int LGBM_CreateDatasetFromMat(const void* data,
if (reference == nullptr) { if (reference == nullptr) {
// sample data first // sample data first
Random rand(config.io_config.data_random_seed); Random rand(config.io_config.data_random_seed);
const size_t sample_cnt = static_cast<size_t>(nrow < config.io_config.bin_construct_sample_cnt ? nrow : config.io_config.bin_construct_sample_cnt); const int sample_cnt = static_cast<int>(nrow < config.io_config.bin_construct_sample_cnt ? nrow : config.io_config.bin_construct_sample_cnt);
auto sample_indices = rand.Sample(nrow, sample_cnt); auto sample_indices = rand.Sample(nrow, sample_cnt);
std::vector<std::vector<double>> sample_values(ncol); std::vector<std::vector<double>> sample_values(ncol);
for (size_t i = 0; i < sample_indices.size(); ++i) { for (size_t i = 0; i < sample_indices.size(); ++i) {
...@@ -251,7 +251,7 @@ DllExport int LGBM_CreateDatasetFromCSR(const void* indptr, ...@@ -251,7 +251,7 @@ DllExport int LGBM_CreateDatasetFromCSR(const void* indptr,
if (reference == nullptr) { if (reference == nullptr) {
// sample data first // sample data first
Random rand(config.io_config.data_random_seed); Random rand(config.io_config.data_random_seed);
const size_t sample_cnt = static_cast<size_t>(nrow < config.io_config.bin_construct_sample_cnt ? nrow : config.io_config.bin_construct_sample_cnt); const int sample_cnt = static_cast<int>(nrow < config.io_config.bin_construct_sample_cnt ? nrow : config.io_config.bin_construct_sample_cnt);
auto sample_indices = rand.Sample(nrow, sample_cnt); auto sample_indices = rand.Sample(nrow, sample_cnt);
std::vector<std::vector<double>> sample_values; std::vector<std::vector<double>> sample_values;
for (size_t i = 0; i < sample_indices.size(); ++i) { for (size_t i = 0; i < sample_indices.size(); ++i) {
...@@ -313,7 +313,7 @@ DllExport int LGBM_CreateDatasetFromCSC(const void* col_ptr, ...@@ -313,7 +313,7 @@ DllExport int LGBM_CreateDatasetFromCSC(const void* col_ptr,
Log::Warning("Construct from CSC format is not efficient"); Log::Warning("Construct from CSC format is not efficient");
// sample data first // sample data first
Random rand(config.io_config.data_random_seed); Random rand(config.io_config.data_random_seed);
const size_t sample_cnt = static_cast<size_t>(nrow < config.io_config.bin_construct_sample_cnt ? nrow : config.io_config.bin_construct_sample_cnt); const int sample_cnt = static_cast<int>(nrow < config.io_config.bin_construct_sample_cnt ? nrow : config.io_config.bin_construct_sample_cnt);
auto sample_indices = rand.Sample(nrow, sample_cnt); auto sample_indices = rand.Sample(nrow, sample_cnt);
std::vector<std::vector<double>> sample_values(ncol_ptr - 1); std::vector<std::vector<double>> sample_values(ncol_ptr - 1);
#pragma omp parallel for schedule(guided) #pragma omp parallel for schedule(guided)
...@@ -762,7 +762,7 @@ ColumnFunctionFromCSC(const void* col_ptr, int col_ptr_type, const int32_t* indi ...@@ -762,7 +762,7 @@ ColumnFunctionFromCSC(const void* col_ptr, int col_ptr_type, const int32_t* indi
throw std::runtime_error("unknown data type in ColumnFunctionFromCSC"); throw std::runtime_error("unknown data type in ColumnFunctionFromCSC");
} }
std::vector<double> SampleFromOneColumn(const std::vector<std::pair<int, double>>& data, const std::vector<size_t>& indices) { std::vector<double> SampleFromOneColumn(const std::vector<std::pair<int, double>>& data, const std::vector<int>& indices) {
size_t j = 0; size_t j = 0;
std::vector<double> ret; std::vector<double> ret;
for (auto row_idx : indices) { for (auto row_idx : indices) {
......
...@@ -512,11 +512,11 @@ std::vector<std::string> DatasetLoader::LoadTextDataToMemory(const char* filenam ...@@ -512,11 +512,11 @@ std::vector<std::string> DatasetLoader::LoadTextDataToMemory(const char* filenam
} }
std::vector<std::string> DatasetLoader::SampleTextDataFromMemory(const std::vector<std::string>& data) { std::vector<std::string> DatasetLoader::SampleTextDataFromMemory(const std::vector<std::string>& data) {
size_t sample_cnt = static_cast<size_t>(io_config_.bin_construct_sample_cnt); int sample_cnt = io_config_.bin_construct_sample_cnt;
if (sample_cnt > data.size()) { if (static_cast<size_t>(sample_cnt) > data.size()) {
sample_cnt = data.size(); sample_cnt = static_cast<int>(data.size());
} }
std::vector<size_t> sample_indices = random_.Sample(data.size(), sample_cnt); auto sample_indices = random_.Sample(static_cast<int>(data.size()), sample_cnt);
std::vector<std::string> out(sample_indices.size()); std::vector<std::string> out(sample_indices.size());
for (size_t i = 0; i < sample_indices.size(); ++i) { for (size_t i = 0; i < sample_indices.size(); ++i) {
const size_t idx = sample_indices[i]; const size_t idx = sample_indices[i];
......
...@@ -38,7 +38,7 @@ score_t DCGCalculator::CalMaxDCGAtK(data_size_t k, const float* label, data_size ...@@ -38,7 +38,7 @@ score_t DCGCalculator::CalMaxDCGAtK(data_size_t k, const float* label, data_size
for (data_size_t i = 0; i < num_data; ++i) { for (data_size_t i = 0; i < num_data; ++i) {
++label_cnt[static_cast<int>(label[i])]; ++label_cnt[static_cast<int>(label[i])];
} }
size_t top_label = label_gain_.size() - 1; int top_label = static_cast<int>(label_gain_.size()) - 1;
if (k > num_data) { k = num_data; } if (k > num_data) { k = num_data; }
// start from top label, and accumulate DCG // start from top label, and accumulate DCG
...@@ -67,7 +67,7 @@ void DCGCalculator::CalMaxDCG(const std::vector<data_size_t>& ks, ...@@ -67,7 +67,7 @@ void DCGCalculator::CalMaxDCG(const std::vector<data_size_t>& ks,
} }
score_t cur_result = 0.0f; score_t cur_result = 0.0f;
data_size_t cur_left = 0; data_size_t cur_left = 0;
size_t top_label = label_gain_.size() - 1; int top_label = static_cast<int>(label_gain_.size()) - 1;
// calculate k Max DCG by one pass // calculate k Max DCG by one pass
for (size_t i = 0; i < ks.size(); ++i) { for (size_t i = 0; i < ks.size(); ++i) {
data_size_t cur_k = ks[i]; data_size_t cur_k = ks[i];
......
...@@ -141,8 +141,8 @@ void SerialTreeLearner::BeforeTrain() { ...@@ -141,8 +141,8 @@ void SerialTreeLearner::BeforeTrain() {
is_feature_used_[i] = false; is_feature_used_[i] = false;
} }
// Get used feature at current tree // Get used feature at current tree
size_t used_feature_cnt = static_cast<size_t>(num_features_*feature_fraction_); int used_feature_cnt = static_cast<int>(num_features_*feature_fraction_);
std::vector<size_t> used_feature_indices = random_.Sample(num_features_, used_feature_cnt); auto used_feature_indices = random_.Sample(num_features_, used_feature_cnt);
for (auto idx : used_feature_indices) { for (auto idx : used_feature_indices) {
is_feature_used_[idx] = true; is_feature_used_[idx] = true;
} }
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment