Commit d6ef6cfa authored by Guolin Ke's avatar Guolin Ke
Browse files

some signed unsigned warnings fixed

parent 68f4547b
......@@ -411,7 +411,7 @@ ColumnFunctionFromCSC(const void* col_ptr, int col_ptr_type, const int32_t* indi
const void* data, int data_type, int64_t ncol_ptr, int64_t nelem);
std::vector<double>
SampleFromOneColumn(const std::vector<std::pair<int, double>>& data, const std::vector<size_t>& indices);
SampleFromOneColumn(const std::vector<std::pair<int, double>>& data, const std::vector<int>& indices);
// exception handle and error msg
......
......@@ -52,12 +52,12 @@ public:
* \param K
* \return K Ordered sampled data from {0,1,...,N-1}
*/
inline std::vector<size_t> Sample(size_t N, size_t K) {
std::vector<size_t> ret;
inline std::vector<int> Sample(int N, int K) {
std::vector<int> ret;
if (K > N || K < 0) {
return ret;
}
for (size_t i = 0; i < N; ++i) {
for (int i = 0; i < N; ++i) {
double prob = (K - ret.size()) / static_cast<double>(N - i);
if (NextDouble() < prob) {
ret.push_back(i);
......
......@@ -88,7 +88,7 @@ private:
// select dropping tree indexes based on drop_rate
// if drop rate is too small, skip this step, drop one tree randomly
if (drop_rate_ > kEpsilon) {
for (size_t i = 0; i < static_cast<size_t>(iter_); ++i) {
for (int i = 0; i < iter_; ++i) {
if (random_for_drop_.NextDouble() < drop_rate_) {
drop_index_.push_back(i);
}
......@@ -128,7 +128,7 @@ private:
}
}
/*! \brief The indexes of dropping trees */
std::vector<size_t> drop_index_;
std::vector<int> drop_index_;
/*! \brief Dropping rate */
double drop_rate_;
/*! \brief Random generator, used to select dropping trees */
......
......@@ -199,7 +199,7 @@ DllExport int LGBM_CreateDatasetFromMat(const void* data,
if (reference == nullptr) {
// sample data first
Random rand(config.io_config.data_random_seed);
const size_t sample_cnt = static_cast<size_t>(nrow < config.io_config.bin_construct_sample_cnt ? nrow : config.io_config.bin_construct_sample_cnt);
const int sample_cnt = static_cast<int>(nrow < config.io_config.bin_construct_sample_cnt ? nrow : config.io_config.bin_construct_sample_cnt);
auto sample_indices = rand.Sample(nrow, sample_cnt);
std::vector<std::vector<double>> sample_values(ncol);
for (size_t i = 0; i < sample_indices.size(); ++i) {
......@@ -251,7 +251,7 @@ DllExport int LGBM_CreateDatasetFromCSR(const void* indptr,
if (reference == nullptr) {
// sample data first
Random rand(config.io_config.data_random_seed);
const size_t sample_cnt = static_cast<size_t>(nrow < config.io_config.bin_construct_sample_cnt ? nrow : config.io_config.bin_construct_sample_cnt);
const int sample_cnt = static_cast<int>(nrow < config.io_config.bin_construct_sample_cnt ? nrow : config.io_config.bin_construct_sample_cnt);
auto sample_indices = rand.Sample(nrow, sample_cnt);
std::vector<std::vector<double>> sample_values;
for (size_t i = 0; i < sample_indices.size(); ++i) {
......@@ -313,7 +313,7 @@ DllExport int LGBM_CreateDatasetFromCSC(const void* col_ptr,
Log::Warning("Construct from CSC format is not efficient");
// sample data first
Random rand(config.io_config.data_random_seed);
const size_t sample_cnt = static_cast<size_t>(nrow < config.io_config.bin_construct_sample_cnt ? nrow : config.io_config.bin_construct_sample_cnt);
const int sample_cnt = static_cast<int>(nrow < config.io_config.bin_construct_sample_cnt ? nrow : config.io_config.bin_construct_sample_cnt);
auto sample_indices = rand.Sample(nrow, sample_cnt);
std::vector<std::vector<double>> sample_values(ncol_ptr - 1);
#pragma omp parallel for schedule(guided)
......@@ -762,7 +762,7 @@ ColumnFunctionFromCSC(const void* col_ptr, int col_ptr_type, const int32_t* indi
throw std::runtime_error("unknown data type in ColumnFunctionFromCSC");
}
std::vector<double> SampleFromOneColumn(const std::vector<std::pair<int, double>>& data, const std::vector<size_t>& indices) {
std::vector<double> SampleFromOneColumn(const std::vector<std::pair<int, double>>& data, const std::vector<int>& indices) {
size_t j = 0;
std::vector<double> ret;
for (auto row_idx : indices) {
......
......@@ -512,11 +512,11 @@ std::vector<std::string> DatasetLoader::LoadTextDataToMemory(const char* filenam
}
std::vector<std::string> DatasetLoader::SampleTextDataFromMemory(const std::vector<std::string>& data) {
size_t sample_cnt = static_cast<size_t>(io_config_.bin_construct_sample_cnt);
if (sample_cnt > data.size()) {
sample_cnt = data.size();
int sample_cnt = io_config_.bin_construct_sample_cnt;
if (static_cast<size_t>(sample_cnt) > data.size()) {
sample_cnt = static_cast<int>(data.size());
}
std::vector<size_t> sample_indices = random_.Sample(data.size(), sample_cnt);
auto sample_indices = random_.Sample(static_cast<int>(data.size()), sample_cnt);
std::vector<std::string> out(sample_indices.size());
for (size_t i = 0; i < sample_indices.size(); ++i) {
const size_t idx = sample_indices[i];
......
......@@ -38,7 +38,7 @@ score_t DCGCalculator::CalMaxDCGAtK(data_size_t k, const float* label, data_size
for (data_size_t i = 0; i < num_data; ++i) {
++label_cnt[static_cast<int>(label[i])];
}
size_t top_label = label_gain_.size() - 1;
int top_label = static_cast<int>(label_gain_.size()) - 1;
if (k > num_data) { k = num_data; }
// start from top label, and accumulate DCG
......@@ -67,7 +67,7 @@ void DCGCalculator::CalMaxDCG(const std::vector<data_size_t>& ks,
}
score_t cur_result = 0.0f;
data_size_t cur_left = 0;
size_t top_label = label_gain_.size() - 1;
int top_label = static_cast<int>(label_gain_.size()) - 1;
// calculate k Max DCG by one pass
for (size_t i = 0; i < ks.size(); ++i) {
data_size_t cur_k = ks[i];
......
......@@ -141,8 +141,8 @@ void SerialTreeLearner::BeforeTrain() {
is_feature_used_[i] = false;
}
// Get used feature at current tree
size_t used_feature_cnt = static_cast<size_t>(num_features_*feature_fraction_);
std::vector<size_t> used_feature_indices = random_.Sample(num_features_, used_feature_cnt);
int used_feature_cnt = static_cast<int>(num_features_*feature_fraction_);
auto used_feature_indices = random_.Sample(num_features_, used_feature_cnt);
for (auto idx : used_feature_indices) {
is_feature_used_[idx] = true;
}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment