Unverified Commit f1a14869 authored by Guolin Ke's avatar Guolin Ke Committed by GitHub
Browse files

fix many cpp lint errors (#2426)

* fix many cpp lint errors

* indent

* fix bug

* fix more

* fix gpu

* more fixes
parent 4f89cc10
......@@ -123,26 +123,26 @@ typedef struct VECTOR_SER {
typedef union { VECTOR_SER s; double align; } SEXPREC_ALIGN;
#define DATAPTR(x) (((SEXPREC_ALIGN *) (x)) + 1)
#define DATAPTR(x) ((reinterpret_cast<SEXPREC_ALIGN*>(x)) + 1)
#define R_CHAR_PTR(x) ((char *) DATAPTR(x))
#define R_CHAR_PTR(x) (reinterpret_cast<char*>DATAPTR(x))
#define R_INT_PTR(x) ((int *) DATAPTR(x))
#define R_INT_PTR(x) (reinterpret_cast<int*> DATAPTR(x))
#define R_INT64_PTR(x) ((int64_t *) DATAPTR(x))
#define R_INT64_PTR(x) (reinterpret_cast<int64_t*> DATAPTR(x))
#define R_REAL_PTR(x) ((double *) DATAPTR(x))
#define R_REAL_PTR(x) (reinterpret_cast<double*> DATAPTR(x))
#define R_AS_INT(x) (*((int *) DATAPTR(x)))
#define R_AS_INT(x) (*(reinterpret_cast<int*> DATAPTR(x)))
#define R_AS_INT64(x) (*((int64_t *) DATAPTR(x)))
#define R_AS_INT64(x) (*(reinterpret_cast<int64_t*> DATAPTR(x)))
#define R_IS_NULL(x) ((*(LGBM_SE)(x)).sxpinfo.type == 0)
#define R_IS_NULL(x) ((*reinterpret_cast<LGBM_SE>(x)).sxpinfo.type == 0)
// 64bit pointer
#if INTPTR_MAX == INT64_MAX
#define R_ADDR(x) ((int64_t *) DATAPTR(x))
#define R_ADDR(x) (reinterpret_cast<int64_t*> DATAPTR(x))
inline void R_SET_PTR(LGBM_SE x, void* ptr) {
if (ptr == nullptr) {
......@@ -156,7 +156,7 @@ inline void* R_GET_PTR(LGBM_SE x) {
if (R_IS_NULL(x)) {
return nullptr;
} else {
auto ret = (void *)(R_ADDR(x)[0]);
auto ret = reinterpret_cast<void*>(R_ADDR(x)[0]);
if (ret == NULL) {
ret = nullptr;
}
......@@ -166,7 +166,7 @@ inline void* R_GET_PTR(LGBM_SE x) {
#else
#define R_ADDR(x) ((int32_t *) DATAPTR(x))
#define R_ADDR(x) (reinterpret_cast<int32_t*> DATAPTR(x))
inline void R_SET_PTR(LGBM_SE x, void* ptr) {
if (ptr == nullptr) {
......@@ -180,7 +180,7 @@ inline void* R_GET_PTR(LGBM_SE x) {
if (R_IS_NULL(x)) {
return nullptr;
} else {
auto ret = (void *)(R_ADDR(x)[0]);
auto ret = reinterpret_cast<void*>(R_ADDR(x)[0]);
if (ret == NULL) {
ret = nullptr;
}
......
......@@ -75,7 +75,7 @@ struct Config {
const std::unordered_map<std::string, std::string>& params,
const std::string& name, bool* out);
static void KV2Map(std::unordered_map<std::string, std::string>& params, const char* kv);
static void KV2Map(std::unordered_map<std::string, std::string>* params, const char* kv);
static std::unordered_map<std::string, std::string> Str2Map(const char* parameters);
#pragma region Parameters
......
......@@ -289,7 +289,7 @@ class Dataset {
LIGHTGBM_EXPORT Dataset(data_size_t num_data);
void Construct(
std::vector<std::unique_ptr<BinMapper>>& bin_mappers,
std::vector<std::unique_ptr<BinMapper>>* bin_mappers,
int** sample_non_zero_indices,
const int* num_per_col,
size_t total_sample_cnt,
......@@ -407,7 +407,7 @@ class Dataset {
void ConstructHistograms(const std::vector<int8_t>& is_feature_used,
const data_size_t* data_indices, data_size_t num_data,
int leaf_idx,
std::vector<std::unique_ptr<OrderedBin>>& ordered_bins,
std::vector<std::unique_ptr<OrderedBin>>* ordered_bins,
const score_t* gradients, const score_t* hessians,
score_t* ordered_gradients, score_t* ordered_hessians,
bool is_constant_hessian,
......
......@@ -52,7 +52,7 @@ class DatasetLoader {
void ConstructBinMappersFromTextData(int rank, int num_machines, const std::vector<std::string>& sample_data, const Parser* parser, Dataset* dataset);
/*! \brief Extract local features from memory */
void ExtractFeaturesFromMemory(std::vector<std::string>& text_data, const Parser* parser, Dataset* dataset);
void ExtractFeaturesFromMemory(std::vector<std::string>* text_data, const Parser* parser, Dataset* dataset);
/*! \brief Extract local features from file */
void ExtractFeaturesFromFile(const char* filename, const Parser* parser, const std::vector<data_size_t>& used_data_indices, Dataset* dataset);
......
......@@ -31,15 +31,15 @@ class FeatureGroup {
* \param sparse_threshold Threshold for treating a feature as a sparse feature
*/
FeatureGroup(int num_feature,
std::vector<std::unique_ptr<BinMapper>>& bin_mappers,
std::vector<std::unique_ptr<BinMapper>>* bin_mappers,
data_size_t num_data, double sparse_threshold, bool is_enable_sparse) : num_feature_(num_feature) {
CHECK(static_cast<int>(bin_mappers.size()) == num_feature);
CHECK(static_cast<int>(bin_mappers->size()) == num_feature);
// use bin at zero to store default_bin
num_total_bin_ = 1;
bin_offsets_.emplace_back(num_total_bin_);
int cnt_non_zero = 0;
for (int i = 0; i < num_feature_; ++i) {
bin_mappers_.emplace_back(bin_mappers[i].release());
bin_mappers_.emplace_back(bin_mappers->at(i).release());
auto num_bin = bin_mappers_[i]->num_bin();
if (bin_mappers_[i]->GetDefaultBin() == 0) {
num_bin -= 1;
......@@ -54,14 +54,14 @@ class FeatureGroup {
}
FeatureGroup(int num_feature,
std::vector<std::unique_ptr<BinMapper>>& bin_mappers,
std::vector<std::unique_ptr<BinMapper>>* bin_mappers,
data_size_t num_data, bool is_sparse) : num_feature_(num_feature) {
CHECK(static_cast<int>(bin_mappers.size()) == num_feature);
CHECK(static_cast<int>(bin_mappers->size()) == num_feature);
// use bin at zero to store default_bin
num_total_bin_ = 1;
bin_offsets_.emplace_back(num_total_bin_);
for (int i = 0; i < num_feature_; ++i) {
bin_mappers_.emplace_back(bin_mappers[i].release());
bin_mappers_.emplace_back(bin_mappers->at(i).release());
auto num_bin = bin_mappers_[i]->num_bin();
if (bin_mappers_[i]->GetDefaultBin() == 0) {
num_bin -= 1;
......
......@@ -166,7 +166,7 @@ class Network {
const ReduceFunction& reducer);
template<class T>
static T GlobalSyncUpByMin(T& local) {
static T GlobalSyncUpByMin(T local) {
T global = local;
Allreduce(reinterpret_cast<char*>(&local),
sizeof(local), sizeof(local),
......@@ -189,7 +189,7 @@ class Network {
return global;
}
template<class T>
static T GlobalSyncUpByMax(T& local) {
static T GlobalSyncUpByMax(T local) {
T global = local;
Allreduce(reinterpret_cast<char*>(&local),
sizeof(local), sizeof(local),
......@@ -213,7 +213,7 @@ class Network {
}
template<class T>
static T GlobalSyncUpBySum(T& local) {
static T GlobalSyncUpBySum(T local) {
T global = (T)0;
Allreduce(reinterpret_cast<char*>(&local),
sizeof(local), sizeof(local),
......@@ -235,15 +235,15 @@ class Network {
}
template<class T>
static T GlobalSyncUpByMean(T& local) {
static T GlobalSyncUpByMean(T local) {
return static_cast<T>(GlobalSyncUpBySum(local) / num_machines_);
}
template<class T>
static void GlobalSum(std::vector<T>& local) {
std::vector<T> global(local.size(), 0);
Allreduce(reinterpret_cast<char*>(local.data()),
static_cast<comm_size_t>(sizeof(T) * local.size()), sizeof(T),
static std::vector<T> GlobalSum(std::vector<T>* local) {
std::vector<T> global(local->size(), 0);
Allreduce(reinterpret_cast<char*>(local->data()),
static_cast<comm_size_t>(sizeof(T) * local->size()), sizeof(T),
reinterpret_cast<char*>(global.data()),
[](const char* src, char* dst, int type_size, comm_size_t len) {
comm_size_t used_size = 0;
......@@ -258,9 +258,7 @@ class Network {
used_size += type_size;
}
});
for (size_t i = 0; i < local.size(); ++i) {
local[i] = global[i];
}
return global;
}
private:
......
......@@ -53,7 +53,7 @@ class TreeLearner {
* \return A trained tree
*/
virtual Tree* Train(const score_t* gradients, const score_t* hessians, bool is_constant_hessian,
Json& forced_split_json) = 0;
const Json& forced_split_json) = 0;
/*!
* \brief use a existing tree to fit the new gradients and hessians.
......
......@@ -116,7 +116,7 @@ class ArrayArgs {
std::vector<VAL_T>& ref = *arr;
VAL_T v = ref[end - 1];
for (;;) {
while (ref[++i] > v);
while (ref[++i] > v) {}
while (v > ref[--j]) { if (j == start) { break; } }
if (i >= j) { break; }
std::swap(ref[i], ref[j]);
......
......@@ -162,31 +162,6 @@ inline static const char* Atoi(const char* p, T* out) {
return p;
}
template <typename T>
inline void SplitToIntLike(const char *c_str, char delimiter,
std::vector<T> &ret) {
CHECK(ret.empty());
std::string str(c_str);
size_t i = 0;
size_t pos = 0;
while (pos < str.length()) {
if (str[pos] == delimiter) {
if (i < pos) {
ret.push_back({});
Atoi(str.substr(i, pos - i).c_str(), &ret.back());
}
++pos;
i = pos;
} else {
++pos;
}
}
if (i < pos) {
ret.push_back({});
Atoi(str.substr(i).c_str(), &ret.back());
}
}
template<typename T>
inline static double Pow(T base, int power) {
if (power < 0) {
......@@ -664,10 +639,10 @@ std::vector<const T*> ConstPtrInVectorWrapper(const std::vector<std::unique_ptr<
}
template<typename T1, typename T2>
inline static void SortForPair(std::vector<T1>& keys, std::vector<T2>& values, size_t start, bool is_reverse = false) {
inline static void SortForPair(std::vector<T1>* keys, std::vector<T2>* values, size_t start, bool is_reverse = false) {
std::vector<std::pair<T1, T2>> arr;
for (size_t i = start; i < keys.size(); ++i) {
arr.emplace_back(keys[i], values[i]);
for (size_t i = start; i < keys->size(); ++i) {
arr.emplace_back(keys->at(i), values->at(i));
}
if (!is_reverse) {
std::stable_sort(arr.begin(), arr.end(), [](const std::pair<T1, T2>& a, const std::pair<T1, T2>& b) {
......@@ -679,16 +654,16 @@ inline static void SortForPair(std::vector<T1>& keys, std::vector<T2>& values, s
});
}
for (size_t i = start; i < arr.size(); ++i) {
keys[i] = arr[i].first;
values[i] = arr[i].second;
keys->at(i) = arr[i].first;
values->at(i) = arr[i].second;
}
}
template <typename T>
inline static std::vector<T*> Vector2Ptr(std::vector<std::vector<T>>& data) {
std::vector<T*> ptr(data.size());
for (size_t i = 0; i < data.size(); ++i) {
ptr[i] = data[i].data();
inline static std::vector<T*> Vector2Ptr(std::vector<std::vector<T>>* data) {
std::vector<T*> ptr(data->size());
for (size_t i = 0; i < data->size(); ++i) {
ptr[i] = data->at(i).data();
}
return ptr;
}
......@@ -715,7 +690,7 @@ inline static double AvoidInf(double x) {
}
inline static float AvoidInf(float x) {
if (std::isnan(x)){
if (std::isnan(x)) {
return 0.0f;
} else if (x >= 1e38) {
return 1e38f;
......@@ -865,13 +840,13 @@ inline static std::vector<uint32_t> EmptyBitset(int n) {
}
template<typename T>
inline static void InsertBitset(std::vector<uint32_t>& vec, const T val) {
inline static void InsertBitset(std::vector<uint32_t>* vec, const T val) {
int i1 = val / 32;
int i2 = val % 32;
if (static_cast<int>(vec.size()) < i1 + 1) {
vec.resize(i1 + 1, 0);
if (static_cast<int>(vec->size()) < i1 + 1) {
vec->resize(i1 + 1, 0);
}
vec[i1] |= (1 << i2);
vec->at(i1) |= (1 << i2);
}
template<typename T>
......
......@@ -29,7 +29,7 @@ class Random {
/*!
* \brief Constructor, with specific seed
*/
Random(int seed) {
explicit Random(int seed) {
x = seed;
}
/*!
......
......@@ -160,7 +160,7 @@ class TextReader {
return ret;
}
INDEX_T SampleFromFile(Random& random, INDEX_T sample_cnt, std::vector<std::string>* out_sampled_data) {
INDEX_T SampleFromFile(Random* random, INDEX_T sample_cnt, std::vector<std::string>* out_sampled_data) {
INDEX_T cur_sample_cnt = 0;
return ReadAllAndProcess(
[&]
......@@ -169,7 +169,7 @@ class TextReader {
out_sampled_data->emplace_back(buffer, size);
++cur_sample_cnt;
} else {
const size_t idx = static_cast<size_t>(random.NextInt(0, static_cast<int>(line_idx + 1)));
const size_t idx = static_cast<size_t>(random->NextInt(0, static_cast<int>(line_idx + 1)));
if (idx < static_cast<size_t>(sample_cnt)) {
out_sampled_data->operator[](idx) = std::string(buffer, size);
}
......@@ -195,7 +195,7 @@ class TextReader {
}
INDEX_T SampleAndFilterFromFile(const std::function<bool(INDEX_T)>& filter_fun, std::vector<INDEX_T>* out_used_data_indices,
Random& random, INDEX_T sample_cnt, std::vector<std::string>* out_sampled_data) {
Random* random, INDEX_T sample_cnt, std::vector<std::string>* out_sampled_data) {
INDEX_T cur_sample_cnt = 0;
out_used_data_indices->clear();
INDEX_T total_cnt = ReadAllAndProcess(
......@@ -208,7 +208,7 @@ class TextReader {
out_sampled_data->emplace_back(buffer, size);
++cur_sample_cnt;
} else {
const size_t idx = static_cast<size_t>(random.NextInt(0, static_cast<int>(out_used_data_indices->size())));
const size_t idx = static_cast<size_t>(random->NextInt(0, static_cast<int>(out_used_data_indices->size())));
if (idx < static_cast<size_t>(sample_cnt)) {
out_sampled_data->operator[](idx) = std::string(buffer, size);
}
......
......@@ -48,7 +48,7 @@ Application::~Application() {
void Application::LoadParameters(int argc, char** argv) {
std::unordered_map<std::string, std::string> params;
for (int i = 1; i < argc; ++i) {
Config::KV2Map(params, argv[i]);
Config::KV2Map(&params, argv[i]);
}
// check for alias
ParameterAlias::KeyAliasTransform(&params);
......@@ -66,7 +66,7 @@ void Application::LoadParameters(int argc, char** argv) {
if (line.size() == 0) {
continue;
}
Config::KV2Map(params, line.c_str());
Config::KV2Map(&params, line.c_str());
}
} else {
Log::Warning("Config file %s doesn't exist, will ignore",
......@@ -85,7 +85,6 @@ void Application::LoadData() {
std::unique_ptr<Predictor> predictor;
// prediction is needed if using input initial model(continued train)
PredictFunction predict_fun = nullptr;
// need to continue training
if (boosting_->NumberOfTotalModel() > 0 && config_.task != TaskType::KRefitTree) {
predictor.reset(new Predictor(boosting_.get(), -1, true, false, false, false, -1, -1));
......
......@@ -157,7 +157,7 @@ void GBDT::Boosting() {
GetGradients(GetTrainingScore(&num_score), gradients_.data(), hessians_.data());
}
data_size_t GBDT::BaggingHelper(Random& cur_rand, data_size_t start, data_size_t cnt, data_size_t* buffer) {
data_size_t GBDT::BaggingHelper(Random* cur_rand, data_size_t start, data_size_t cnt, data_size_t* buffer) {
if (cnt <= 0) {
return 0;
}
......@@ -168,7 +168,7 @@ data_size_t GBDT::BaggingHelper(Random& cur_rand, data_size_t start, data_size_t
// random bagging, minimal unit is one record
for (data_size_t i = 0; i < cnt; ++i) {
float prob = (bag_data_cnt - cur_left_cnt) / static_cast<float>(cnt - i);
if (cur_rand.NextFloat() < prob) {
if (cur_rand->NextFloat() < prob) {
buffer[cur_left_cnt++] = start + i;
} else {
right_buffer[cur_right_cnt++] = start + i;
......@@ -178,7 +178,7 @@ data_size_t GBDT::BaggingHelper(Random& cur_rand, data_size_t start, data_size_t
return cur_left_cnt;
}
data_size_t GBDT::BalancedBaggingHelper(Random& cur_rand, data_size_t start, data_size_t cnt, data_size_t* buffer) {
data_size_t GBDT::BalancedBaggingHelper(Random* cur_rand, data_size_t start, data_size_t cnt, data_size_t* buffer) {
if (cnt <= 0) {
return 0;
}
......@@ -192,9 +192,9 @@ data_size_t GBDT::BalancedBaggingHelper(Random& cur_rand, data_size_t start, dat
bool is_pos = label_ptr[start + i] > 0;
bool is_in_bag = false;
if (is_pos) {
is_in_bag = cur_rand.NextFloat() < config_->pos_bagging_fraction;
is_in_bag = cur_rand->NextFloat() < config_->pos_bagging_fraction;
} else {
is_in_bag = cur_rand.NextFloat() < config_->neg_bagging_fraction;
is_in_bag = cur_rand->NextFloat() < config_->neg_bagging_fraction;
}
if (is_in_bag) {
buffer[cur_left_cnt++] = start + i;
......@@ -228,9 +228,9 @@ void GBDT::Bagging(int iter) {
Random cur_rand(config_->bagging_seed + iter * num_threads_ + i);
data_size_t cur_left_count = 0;
if (balanced_bagging_) {
cur_left_count = BalancedBaggingHelper(cur_rand, cur_start, cur_cnt, tmp_indices_.data() + cur_start);
cur_left_count = BalancedBaggingHelper(&cur_rand, cur_start, cur_cnt, tmp_indices_.data() + cur_start);
} else {
cur_left_count = BaggingHelper(cur_rand, cur_start, cur_cnt, tmp_indices_.data() + cur_start);
cur_left_count = BaggingHelper(&cur_rand, cur_start, cur_cnt, tmp_indices_.data() + cur_start);
}
offsets_buf_[i] = cur_start;
left_cnts_buf_[i] = cur_left_count;
......
......@@ -141,7 +141,7 @@ class GBDT : public GBDTBase {
* \param hessians nullptr for using default objective, otherwise use self-defined boosting
* \return True if cannot train any more
*/
virtual bool TrainOneIter(const score_t* gradients, const score_t* hessians) override;
bool TrainOneIter(const score_t* gradients, const score_t* hessians) override;
/*!
* \brief Rollback one iteration
......@@ -177,14 +177,14 @@ class GBDT : public GBDTBase {
* \param out_len length of returned score
* \return training score
*/
virtual const double* GetTrainingScore(int64_t* out_len) override;
const double* GetTrainingScore(int64_t* out_len) override;
/*!
* \brief Get size of prediction at data_idx data
* \param data_idx 0: training data, 1: 1st validation data
* \return The size of prediction
*/
virtual int64_t GetNumPredictAt(int data_idx) const override {
int64_t GetNumPredictAt(int data_idx) const override {
CHECK(data_idx >= 0 && data_idx <= static_cast<int>(valid_score_updater_.size()));
data_size_t num_data = train_data_->num_data();
if (data_idx > 0) {
......@@ -272,7 +272,7 @@ class GBDT : public GBDTBase {
* \param filename Filename that want to save to
* \return is_finish Is training finished or not
*/
virtual bool SaveModelToFile(int start_iteration, int num_iterations, const char* filename) const override;
bool SaveModelToFile(int start_iteration, int num_iterations, const char* filename) const override;
/*!
* \brief Save model to string
......@@ -280,7 +280,7 @@ class GBDT : public GBDTBase {
* \param num_iterations Number of model that want to save, -1 means save all
* \return Non-empty string if succeeded
*/
virtual std::string SaveModelToString(int start_iteration, int num_iterations) const override;
std::string SaveModelToString(int start_iteration, int num_iterations) const override;
/*!
* \brief Restore from a serialized buffer
......@@ -359,7 +359,7 @@ class GBDT : public GBDTBase {
/*!
* \brief Get Type name of this boosting object
*/
virtual const char* SubModelName() const override { return "tree"; }
const char* SubModelName() const override { return "tree"; }
protected:
/*!
......@@ -385,7 +385,7 @@ class GBDT : public GBDTBase {
* \param buffer output buffer
* \return count of left size
*/
data_size_t BaggingHelper(Random& cur_rand, data_size_t start, data_size_t cnt, data_size_t* buffer);
data_size_t BaggingHelper(Random* cur_rand, data_size_t start, data_size_t cnt, data_size_t* buffer);
/*!
......@@ -395,7 +395,7 @@ class GBDT : public GBDTBase {
* \param buffer output buffer
* \return count of left size
*/
data_size_t BalancedBaggingHelper(Random& cur_rand, data_size_t start, data_size_t cnt, data_size_t* buffer);
data_size_t BalancedBaggingHelper(Random* cur_rand, data_size_t start, data_size_t cnt, data_size_t* buffer);
/*!
* \brief calculate the object function
......
......@@ -14,7 +14,7 @@
namespace LightGBM {
const std::string kModelVersion = "v3";
const char* kModelVersion = "v3";
std::string GBDT::DumpModel(int start_iteration, int num_iteration) const {
std::stringstream str_buf;
......@@ -435,7 +435,7 @@ bool GBDT::LoadModelFromString(const char* buffer, size_t len) {
// get monotone_constraints
if (key_vals.count("monotone_constraints")) {
Common::SplitToIntLike(key_vals["monotone_constraints"].c_str(), ' ', monotone_constraints_);
monotone_constraints_ = Common::StringToArray<int8_t>(key_vals["monotone_constraints"].c_str(), ' ');
if (monotone_constraints_.size() != static_cast<size_t>(max_feature_idx_ + 1)) {
Log::Fatal("Wrong size of monotone_constraints");
return false;
......
......@@ -88,7 +88,7 @@ class GOSS: public GBDT {
bag_data_cnt_ = num_data_;
}
data_size_t BaggingHelper(Random& cur_rand, data_size_t start, data_size_t cnt, data_size_t* buffer, data_size_t* buffer_right) {
data_size_t BaggingHelper(Random* cur_rand, data_size_t start, data_size_t cnt, data_size_t* buffer, data_size_t* buffer_right) {
if (cnt <= 0) {
return 0;
}
......@@ -123,7 +123,7 @@ class GOSS: public GBDT {
data_size_t rest_need = other_k - sampled;
data_size_t rest_all = (cnt - i) - (top_k - big_weight_cnt);
double prob = (rest_need) / static_cast<double>(rest_all);
if (cur_rand.NextFloat() < prob) {
if (cur_rand->NextFloat() < prob) {
buffer[cur_left_cnt++] = start + i;
for (int cur_tree_id = 0; cur_tree_id < num_tree_per_iteration_; ++cur_tree_id) {
size_t idx = static_cast<size_t>(cur_tree_id) * num_data_ + start + i;
......@@ -157,7 +157,7 @@ class GOSS: public GBDT {
data_size_t cur_cnt = inner_size;
if (cur_start + cur_cnt > num_data_) { cur_cnt = num_data_ - cur_start; }
Random cur_rand(config_->bagging_seed + iter * num_threads_ + i);
data_size_t cur_left_count = BaggingHelper(cur_rand, cur_start, cur_cnt,
data_size_t cur_left_count = BaggingHelper(&cur_rand, cur_start, cur_cnt,
tmp_indices_.data() + cur_start, tmp_indice_right_.data() + cur_start);
offsets_buf_[i] = cur_start;
left_cnts_buf_[i] = cur_left_count;
......
......@@ -11,9 +11,7 @@
#include <cmath>
#include <vector>
namespace {
using namespace LightGBM;
namespace LightGBM {
PredictionEarlyStopInstance CreateNone(const PredictionEarlyStopConfig&) {
return PredictionEarlyStopInstance{
......@@ -74,10 +72,6 @@ PredictionEarlyStopInstance CreateBinary(const PredictionEarlyStopConfig& config
};
}
} // namespace
namespace LightGBM {
PredictionEarlyStopInstance CreatePredictionEarlyStopInstance(const std::string& type,
const PredictionEarlyStopConfig& config) {
if (type == "none") {
......
......@@ -54,7 +54,7 @@ class SingleRowPredictor {
PredictFunction predict_function;
int64_t num_pred_in_one_row;
SingleRowPredictor(int predict_type, Boosting& boosting, const Config& config, int iter) {
SingleRowPredictor(int predict_type, Boosting* boosting, const Config& config, int iter) {
bool is_predict_leaf = false;
bool is_raw_score = false;
bool predict_contrib = false;
......@@ -71,21 +71,21 @@ class SingleRowPredictor {
early_stop_freq_ = config.pred_early_stop_freq;
early_stop_margin_ = config.pred_early_stop_margin;
iter_ = iter;
predictor_.reset(new Predictor(&boosting, iter_, is_raw_score, is_predict_leaf, predict_contrib,
predictor_.reset(new Predictor(boosting, iter_, is_raw_score, is_predict_leaf, predict_contrib,
early_stop_, early_stop_freq_, early_stop_margin_));
num_pred_in_one_row = boosting.NumPredictOneRow(iter_, is_predict_leaf, predict_contrib);
num_pred_in_one_row = boosting->NumPredictOneRow(iter_, is_predict_leaf, predict_contrib);
predict_function = predictor_->GetPredictFunction();
num_total_model_ = boosting.NumberOfTotalModel();
num_total_model_ = boosting->NumberOfTotalModel();
}
~SingleRowPredictor() {}
bool IsPredictorEqual(const Config& config, int iter, Boosting& boosting) {
bool IsPredictorEqual(const Config& config, int iter, Boosting* boosting) {
return early_stop_ != config.pred_early_stop ||
early_stop_freq_ != config.pred_early_stop_freq ||
early_stop_margin_ != config.pred_early_stop_margin ||
iter_ != iter ||
num_total_model_ != boosting.NumberOfTotalModel();
num_total_model_ != boosting->NumberOfTotalModel();
}
private:
std::unique_ptr<Predictor> predictor_;
bool early_stop_;
......@@ -255,8 +255,8 @@ class Booster {
double* out_result, int64_t* out_len) {
std::lock_guard<std::mutex> lock(mutex_);
if (single_row_predictor_[predict_type].get() == nullptr ||
!single_row_predictor_[predict_type]->IsPredictorEqual(config, num_iteration, *boosting_.get())) {
single_row_predictor_[predict_type].reset(new SingleRowPredictor(predict_type, *boosting_.get(),
!single_row_predictor_[predict_type]->IsPredictorEqual(config, num_iteration, boosting_.get())) {
single_row_predictor_[predict_type].reset(new SingleRowPredictor(predict_type, boosting_.get(),
config, num_iteration));
}
......@@ -645,8 +645,8 @@ int LGBM_DatasetCreateFromMats(int32_t nmat,
}
}
DatasetLoader loader(config, nullptr, 1, nullptr);
ret.reset(loader.CostructFromSampleData(Common::Vector2Ptr<double>(sample_values).data(),
Common::Vector2Ptr<int>(sample_idx).data(),
ret.reset(loader.CostructFromSampleData(Common::Vector2Ptr<double>(&sample_values).data(),
Common::Vector2Ptr<int>(&sample_idx).data(),
static_cast<int>(sample_values.size()),
Common::VectorSize<double>(sample_values).data(),
sample_cnt, total_nrow));
......@@ -716,8 +716,8 @@ int LGBM_DatasetCreateFromCSR(const void* indptr,
}
}
DatasetLoader loader(config, nullptr, 1, nullptr);
ret.reset(loader.CostructFromSampleData(Common::Vector2Ptr<double>(sample_values).data(),
Common::Vector2Ptr<int>(sample_idx).data(),
ret.reset(loader.CostructFromSampleData(Common::Vector2Ptr<double>(&sample_values).data(),
Common::Vector2Ptr<int>(&sample_idx).data(),
static_cast<int>(sample_values.size()),
Common::VectorSize<double>(sample_values).data(),
sample_cnt, nrow));
......@@ -781,8 +781,8 @@ int LGBM_DatasetCreateFromCSRFunc(void* get_row_funptr,
}
}
DatasetLoader loader(config, nullptr, 1, nullptr);
ret.reset(loader.CostructFromSampleData(Common::Vector2Ptr<double>(sample_values).data(),
Common::Vector2Ptr<int>(sample_idx).data(),
ret.reset(loader.CostructFromSampleData(Common::Vector2Ptr<double>(&sample_values).data(),
Common::Vector2Ptr<int>(&sample_idx).data(),
static_cast<int>(sample_values.size()),
Common::VectorSize<double>(sample_values).data(),
sample_cnt, nrow));
......@@ -854,8 +854,8 @@ int LGBM_DatasetCreateFromCSC(const void* col_ptr,
}
OMP_THROW_EX();
DatasetLoader loader(config, nullptr, 1, nullptr);
ret.reset(loader.CostructFromSampleData(Common::Vector2Ptr<double>(sample_values).data(),
Common::Vector2Ptr<int>(sample_idx).data(),
ret.reset(loader.CostructFromSampleData(Common::Vector2Ptr<double>(&sample_values).data(),
Common::Vector2Ptr<int>(&sample_idx).data(),
static_cast<int>(sample_values.size()),
Common::VectorSize<double>(sample_values).data(),
sample_cnt, nrow));
......
......@@ -328,7 +328,7 @@ namespace LightGBM {
"Consider renumbering to consecutive integers started from zero");
}
// sort by counts
Common::SortForPair<int, int>(counts_int, distinct_values_int, 0, true);
Common::SortForPair<int, int>(&counts_int, &distinct_values_int, 0, true);
// avoid first bin is zero
if (distinct_values_int[0] == 0) {
if (counts_int.size() == 1) {
......
......@@ -12,7 +12,7 @@
namespace LightGBM {
void Config::KV2Map(std::unordered_map<std::string, std::string>& params, const char* kv) {
void Config::KV2Map(std::unordered_map<std::string, std::string>* params, const char* kv) {
std::vector<std::string> tmp_strs = Common::Split(kv, '=');
if (tmp_strs.size() == 2 || tmp_strs.size() == 1) {
std::string key = Common::RemoveQuotationSymbol(Common::Trim(tmp_strs[0]));
......@@ -24,9 +24,9 @@ void Config::KV2Map(std::unordered_map<std::string, std::string>& params, const
Log::Fatal("Do not support non-ascii characters in config.");
}
if (key.size() > 0) {
auto value_search = params.find(key);
if (value_search == params.end()) { // not set
params.emplace(key, value);
auto value_search = params->find(key);
if (value_search == params->end()) { // not set
params->emplace(key, value);
} else {
Log::Warning("%s is set=%s, %s=%s will be ignored. Current value: %s=%s",
key.c_str(), value_search->second.c_str(), key.c_str(), value.c_str(),
......@@ -42,7 +42,7 @@ std::unordered_map<std::string, std::string> Config::Str2Map(const char* paramet
std::unordered_map<std::string, std::string> params;
auto args = Common::Split(parameters, " \t\n\r");
for (auto arg : args) {
KV2Map(params, Common::Trim(arg).c_str());
KV2Map(&params, Common::Trim(arg).c_str());
}
ParameterAlias::KeyAliasTransform(&params);
return params;
......@@ -210,7 +210,7 @@ void Config::Set(const std::unordered_map<std::string, std::string>& params) {
// generate seeds by seed.
if (GetInt(params, "seed", &seed)) {
Random rand(seed);
int int_max = std::numeric_limits<short>::max();
int int_max = std::numeric_limits<int16_t>::max();
data_random_seed = static_cast<int>(rand.NextShort(0, int_max));
bagging_seed = static_cast<int>(rand.NextShort(0, int_max));
drop_seed = static_cast<int>(rand.NextShort(0, int_max));
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment