Commit 462612b4 authored by Nikita Titov's avatar Nikita Titov Committed by Guolin Ke
Browse files

fixed modifiers indent (#1997)

parent 8e286b38
......@@ -23,7 +23,7 @@ namespace LightGBM {
* \brief Used to predict data with input model
*/
class Predictor {
public:
public:
/*!
* \brief Constructor
* \param boosting Input boosting model
......@@ -207,7 +207,7 @@ public:
predict_data_reader.ReadAllAndProcessParallel(process_fun);
}
private:
private:
void CopyToPredictBuffer(double* pred_buf, const std::vector<std::pair<int, double>>& features) {
int loop_size = static_cast<int>(features.size());
for (int i = 0; i < loop_size; ++i) {
......
......@@ -15,7 +15,7 @@ namespace LightGBM {
* \brief DART algorithm implementation. including Training, prediction, bagging.
*/
class DART: public GBDT {
public:
public:
/*!
* \brief Constructor
*/
......@@ -84,7 +84,7 @@ public:
return false;
}
private:
private:
/*!
* \brief drop trees based on drop_rate
*/
......
......@@ -24,7 +24,7 @@ namespace LightGBM {
* \brief GBDT algorithm implementation. including Training, prediction, bagging.
*/
class GBDT : public GBDTBase {
public:
public:
/*!
* \brief Constructor
*/
......@@ -354,7 +354,7 @@ public:
*/
virtual const char* SubModelName() const override { return "tree"; }
protected:
protected:
/*!
* \brief Print eval result and check early stopping
*/
......
......@@ -24,7 +24,7 @@ std::chrono::duration<double, std::milli> re_init_tree_time;
#endif
class GOSS: public GBDT {
public:
public:
/*!
* \brief Constructor
*/
......@@ -208,7 +208,7 @@ public:
}
}
private:
private:
std::vector<data_size_t> tmp_indice_right_;
};
......
......@@ -16,7 +16,7 @@ namespace LightGBM {
* \brief Rondom Forest implementation
*/
class RF : public GBDT {
public:
public:
RF() : GBDT() {
average_output_ = true;
}
......@@ -199,7 +199,7 @@ public:
return true;
};
private:
private:
std::vector<score_t> tmp_grad_;
std::vector<score_t> tmp_hess_;
std::vector<double> init_scores_;
......
......@@ -15,7 +15,7 @@ namespace LightGBM {
* \brief Used to store and update score for data
*/
class ScoreUpdater {
public:
public:
/*!
* \brief Constructor, will pass a const pointer of dataset
* \param data This class will bind with this data set
......@@ -109,7 +109,7 @@ public:
/*! \brief Disable copy */
ScoreUpdater(const ScoreUpdater&) = delete;
private:
private:
/*! \brief Number of total data */
data_size_t num_data_;
/*! \brief Pointer of data set */
......
......@@ -44,7 +44,7 @@ catch(...) { return LGBM_APIHandleException("unknown exception"); } \
return 0;
class Booster {
public:
public:
explicit Booster(const char* filename) {
boosting_.reset(Boosting::CreateBoosting("gbdt", filename));
}
......@@ -323,7 +323,7 @@ public:
const Boosting* GetBoosting() const { return boosting_.get(); }
private:
private:
const Dataset* train_data_;
std::unique_ptr<Boosting> boosting_;
/*! \brief All configs */
......@@ -356,7 +356,7 @@ RowFunctionFromCSR(const void* indptr, int indptr_type, const int32_t* indices,
// Row iterator of on column for CSC matrix
class CSC_RowIterator {
public:
public:
CSC_RowIterator(const void* col_ptr, int col_ptr_type, const int32_t* indices,
const void* data, int data_type, int64_t ncol_ptr, int64_t nelem, int col_idx);
~CSC_RowIterator() {}
......@@ -364,7 +364,8 @@ public:
double Get(int idx);
// return next non-zero pair, if index < 0, means no more data
std::pair<int, double> NextNonZero();
private:
private:
int nonzero_idx_ = 0;
int cur_idx_ = -1;
double cur_val_ = 0.0f;
......
......@@ -14,7 +14,7 @@ class DenseBin;
template <typename VAL_T>
class DenseBinIterator: public BinIterator {
public:
public:
explicit DenseBinIterator(const DenseBin<VAL_T>* bin_data, uint32_t min_bin, uint32_t max_bin, uint32_t default_bin)
: bin_data_(bin_data), min_bin_(static_cast<VAL_T>(min_bin)),
max_bin_(static_cast<VAL_T>(max_bin)),
......@@ -28,7 +28,8 @@ public:
inline uint32_t RawGet(data_size_t idx) override;
inline uint32_t Get(data_size_t idx) override;
inline void Reset(data_size_t) override { }
private:
private:
const DenseBin<VAL_T>* bin_data_;
VAL_T min_bin_;
VAL_T max_bin_;
......@@ -41,7 +42,7 @@ private:
*/
template <typename VAL_T>
class DenseBin: public Bin {
public:
public:
friend DenseBinIterator<VAL_T>;
DenseBin(data_size_t num_data)
: num_data_(num_data), data_(num_data_, static_cast<VAL_T>(0)) {
......@@ -310,7 +311,7 @@ public:
return sizeof(VAL_T) * num_data_;
}
protected:
protected:
data_size_t num_data_;
std::vector<VAL_T> data_;
};
......
......@@ -12,7 +12,7 @@ namespace LightGBM {
class Dense4bitsBin;
class Dense4bitsBinIterator : public BinIterator {
public:
public:
explicit Dense4bitsBinIterator(const Dense4bitsBin* bin_data, uint32_t min_bin, uint32_t max_bin, uint32_t default_bin)
: bin_data_(bin_data), min_bin_(static_cast<uint8_t>(min_bin)),
max_bin_(static_cast<uint8_t>(max_bin)),
......@@ -26,7 +26,8 @@ public:
inline uint32_t RawGet(data_size_t idx) override;
inline uint32_t Get(data_size_t idx) override;
inline void Reset(data_size_t) override {}
private:
private:
const Dense4bitsBin* bin_data_;
uint8_t min_bin_;
uint8_t max_bin_;
......@@ -35,7 +36,7 @@ private:
};
class Dense4bitsBin : public Bin {
public:
public:
friend Dense4bitsBinIterator;
Dense4bitsBin(data_size_t num_data)
: num_data_(num_data) {
......@@ -362,7 +363,7 @@ public:
return sizeof(uint8_t) * data_.size();
}
protected:
protected:
data_size_t num_data_;
std::vector<uint8_t> data_;
std::vector<uint8_t> buf_;
......
......@@ -42,7 +42,7 @@ struct LocalFile : VirtualFileReader, VirtualFileWriter {
return fwrite(buffer, bytes, 1, file_) == 1 ? bytes : 0;
}
private:
private:
FILE* file_ = NULL;
const std::string filename_;
const std::string mode_;
......@@ -86,7 +86,7 @@ struct HDFSFile : VirtualFileReader, VirtualFileWriter {
return FileOperation<const void*>(data, bytes, &hdfsWrite);
}
private:
private:
template <typename BufferType>
using fileOp = tSize(*)(hdfsFS, hdfsFile, BufferType, tSize);
......
......@@ -147,7 +147,7 @@ void Json::dump(string &out) const {
template <Json::Type tag, typename T>
class Value : public JsonValue {
protected:
protected:
// Constructors
explicit Value(const T &value) : m_value(value) {}
explicit Value(T &&value) : m_value(move(value)) {}
......@@ -174,7 +174,7 @@ class JsonDouble final : public Value<Json::NUMBER, double> {
int int_value() const override { return static_cast<int>(m_value); }
bool equals(const JsonValue * other) const override { return m_value == other->number_value(); }
bool less(const JsonValue * other) const override { return m_value < other->number_value(); }
public:
public:
explicit JsonDouble(double value) : Value(value) {}
};
......@@ -183,19 +183,19 @@ class JsonInt final : public Value<Json::NUMBER, int> {
int int_value() const override { return m_value; }
bool equals(const JsonValue * other) const override { return m_value == other->number_value(); }
bool less(const JsonValue * other) const override { return m_value < other->number_value(); }
public:
public:
explicit JsonInt(int value) : Value(value) {}
};
class JsonBoolean final : public Value<Json::BOOL, bool> {
bool bool_value() const override { return m_value; }
public:
public:
explicit JsonBoolean(bool value) : Value(value) {}
};
class JsonString final : public Value<Json::STRING, string> {
const string &string_value() const override { return m_value; }
public:
public:
explicit JsonString(const string &value) : Value(value) {}
explicit JsonString(string &&value) : Value(move(value)) {}
};
......@@ -203,7 +203,7 @@ public:
class JsonArray final : public Value<Json::ARRAY, Json::array> {
const Json::array &array_items() const override { return m_value; }
const Json & operator[](size_t i) const override;
public:
public:
explicit JsonArray(const Json::array &value) : Value(value) {}
explicit JsonArray(Json::array &&value) : Value(move(value)) {}
};
......@@ -211,13 +211,13 @@ public:
class JsonObject final : public Value<Json::OBJECT, Json::object> {
const Json::object &object_items() const override { return m_value; }
const Json & operator[](const string &key) const override;
public:
public:
explicit JsonObject(const Json::object &value) : Value(value) {}
explicit JsonObject(Json::object &&value) : Value(move(value)) {}
};
class JsonNull final : public Value<Json::NUL, NullStruct> {
public:
public:
JsonNull() : Value({}) {}
};
......
......@@ -24,7 +24,7 @@ namespace LightGBM {
*/
template <typename VAL_T>
class OrderedSparseBin: public OrderedBin {
public:
public:
/*! \brief Pair to store one bin entry */
struct SparsePair {
data_size_t ridx; // data(row) index
......@@ -192,7 +192,7 @@ public:
/*! \brief Disable copy */
OrderedSparseBin<VAL_T>(const OrderedSparseBin<VAL_T>&) = delete;
private:
private:
const SparseBin<VAL_T>* bin_data_;
/*! \brief Store non-zero pair , group by leaf */
std::vector<SparsePair> ordered_pair_;
......
......@@ -13,7 +13,7 @@
namespace LightGBM {
class CSVParser: public Parser {
public:
public:
explicit CSVParser(int label_idx, int total_columns)
:label_idx_(label_idx), total_columns_(total_columns) {
}
......@@ -45,13 +45,13 @@ public:
return total_columns_;
}
private:
private:
int label_idx_ = 0;
int total_columns_ = -1;
};
class TSVParser: public Parser {
public:
public:
explicit TSVParser(int label_idx, int total_columns)
:label_idx_(label_idx), total_columns_(total_columns) {
}
......@@ -81,13 +81,13 @@ public:
return total_columns_;
}
private:
private:
int label_idx_ = 0;
int total_columns_ = -1;
};
class LibSVMParser: public Parser {
public:
public:
explicit LibSVMParser(int label_idx)
:label_idx_(label_idx) {
if (label_idx > 0) {
......@@ -121,7 +121,7 @@ public:
return -1;
}
private:
private:
int label_idx_ = 0;
};
......
......@@ -20,7 +20,7 @@ const size_t kNumFastIndex = 64;
template <typename VAL_T>
class SparseBinIterator: public BinIterator {
public:
public:
SparseBinIterator(const SparseBin<VAL_T>* bin_data,
uint32_t min_bin, uint32_t max_bin, uint32_t default_bin)
: bin_data_(bin_data), min_bin_(static_cast<VAL_T>(min_bin)),
......@@ -52,7 +52,7 @@ public:
inline void Reset(data_size_t idx) override;
private:
private:
const SparseBin<VAL_T>* bin_data_;
data_size_t cur_pos_;
data_size_t i_delta_;
......@@ -67,7 +67,7 @@ class OrderedSparseBin;
template <typename VAL_T>
class SparseBin: public Bin {
public:
public:
friend class SparseBinIterator<VAL_T>;
friend class OrderedSparseBin<VAL_T>;
......@@ -407,7 +407,7 @@ public:
GetFastIndex();
}
protected:
protected:
data_size_t num_data_;
std::vector<uint8_t> deltas_;
std::vector<VAL_T> vals_;
......
......@@ -18,7 +18,7 @@ namespace LightGBM {
*/
template<typename PointWiseLossCalculator>
class BinaryMetric: public Metric {
public:
public:
explicit BinaryMetric(const Config&) {
}
......@@ -92,7 +92,7 @@ public:
return std::vector<double>(1, loss);
}
private:
private:
/*! \brief Number of data */
data_size_t num_data_;
/*! \brief Pointer of label */
......@@ -109,7 +109,7 @@ private:
* \brief Log loss metric for binary classification task.
*/
class BinaryLoglossMetric: public BinaryMetric<BinaryLoglossMetric> {
public:
public:
explicit BinaryLoglossMetric(const Config& config) :BinaryMetric<BinaryLoglossMetric>(config) {}
inline static double LossOnPoint(label_t label, double prob) {
......@@ -133,7 +133,7 @@ public:
* \brief Error rate metric for binary classification task.
*/
class BinaryErrorMetric: public BinaryMetric<BinaryErrorMetric> {
public:
public:
explicit BinaryErrorMetric(const Config& config) :BinaryMetric<BinaryErrorMetric>(config) {}
inline static double LossOnPoint(label_t label, double prob) {
......@@ -153,7 +153,7 @@ public:
* \brief Auc Metric for binary classification task.
*/
class AUCMetric: public Metric {
public:
public:
explicit AUCMetric(const Config&) {
}
......@@ -246,7 +246,7 @@ public:
return std::vector<double>(1, auc);
}
private:
private:
/*! \brief Number of data */
data_size_t num_data_;
/*! \brief Pointer of label */
......
......@@ -13,7 +13,7 @@
namespace LightGBM {
class MapMetric:public Metric {
public:
public:
explicit MapMetric(const Config& config) {
// get eval position
eval_at_ = config.eval_at;
......@@ -142,7 +142,7 @@ public:
return result;
}
private:
private:
/*! \brief Number of data */
data_size_t num_data_;
/*! \brief Pointer of label */
......
......@@ -14,7 +14,7 @@ namespace LightGBM {
*/
template<typename PointWiseLossCalculator>
class MulticlassMetric: public Metric {
public:
public:
explicit MulticlassMetric(const Config& config) {
num_class_ = config.num_class;
}
......@@ -112,7 +112,7 @@ public:
return std::vector<double>(1, loss);
}
private:
private:
/*! \brief Number of data */
data_size_t num_data_;
/*! \brief Pointer of label */
......@@ -128,7 +128,7 @@ private:
/*! \brief L2 loss for multiclass task */
class MultiErrorMetric: public MulticlassMetric<MultiErrorMetric> {
public:
public:
explicit MultiErrorMetric(const Config& config) :MulticlassMetric<MultiErrorMetric>(config) {}
inline static double LossOnPoint(label_t label, std::vector<double>& score) {
......@@ -148,7 +148,7 @@ public:
/*! \brief Logloss for multiclass task */
class MultiSoftmaxLoglossMetric: public MulticlassMetric<MultiSoftmaxLoglossMetric> {
public:
public:
explicit MultiSoftmaxLoglossMetric(const Config& config) :MulticlassMetric<MultiSoftmaxLoglossMetric>(config) {}
inline static double LossOnPoint(label_t label, std::vector<double>& score) {
......
......@@ -13,7 +13,7 @@
namespace LightGBM {
class NDCGMetric:public Metric {
public:
public:
explicit NDCGMetric(const Config& config) {
// get eval position
eval_at_ = config.eval_at;
......@@ -143,7 +143,7 @@ public:
return result;
}
private:
private:
/*! \brief Number of data */
data_size_t num_data_;
/*! \brief Pointer of label */
......
......@@ -14,7 +14,7 @@ namespace LightGBM {
*/
template<typename PointWiseLossCalculator>
class RegressionMetric: public Metric {
public:
public:
explicit RegressionMetric(const Config& config) :config_(config) {
}
......@@ -95,7 +95,7 @@ public:
inline static void CheckLabel(label_t) {
}
private:
private:
/*! \brief Number of data */
data_size_t num_data_;
/*! \brief Pointer of label */
......@@ -111,7 +111,7 @@ private:
/*! \brief RMSE loss for regression task */
class RMSEMetric: public RegressionMetric<RMSEMetric> {
public:
public:
explicit RMSEMetric(const Config& config) :RegressionMetric<RMSEMetric>(config) {}
inline static double LossOnPoint(label_t label, double score, const Config&) {
......@@ -130,7 +130,7 @@ public:
/*! \brief L2 loss for regression task */
class L2Metric: public RegressionMetric<L2Metric> {
public:
public:
explicit L2Metric(const Config& config) :RegressionMetric<L2Metric>(config) {}
inline static double LossOnPoint(label_t label, double score, const Config&) {
......@@ -144,7 +144,7 @@ public:
/*! \brief L2 loss for regression task */
class QuantileMetric : public RegressionMetric<QuantileMetric> {
public:
public:
explicit QuantileMetric(const Config& config) :RegressionMetric<QuantileMetric>(config) {
}
......@@ -165,7 +165,7 @@ public:
/*! \brief L1 loss for regression task */
class L1Metric: public RegressionMetric<L1Metric> {
public:
public:
explicit L1Metric(const Config& config) :RegressionMetric<L1Metric>(config) {}
inline static double LossOnPoint(label_t label, double score, const Config&) {
......@@ -178,7 +178,7 @@ public:
/*! \brief Huber loss for regression task */
class HuberLossMetric: public RegressionMetric<HuberLossMetric> {
public:
public:
explicit HuberLossMetric(const Config& config) :RegressionMetric<HuberLossMetric>(config) {
}
......@@ -199,7 +199,7 @@ public:
/*! \brief Fair loss for regression task */
// http://research.microsoft.com/en-us/um/people/zhang/INRIA/Publis/Tutorial-Estim/node24.html
class FairLossMetric: public RegressionMetric<FairLossMetric> {
public:
public:
explicit FairLossMetric(const Config& config) :RegressionMetric<FairLossMetric>(config) {
}
......@@ -216,7 +216,7 @@ public:
/*! \brief Poisson regression loss for regression task */
class PoissonMetric: public RegressionMetric<PoissonMetric> {
public:
public:
explicit PoissonMetric(const Config& config) :RegressionMetric<PoissonMetric>(config) {
}
......@@ -235,7 +235,7 @@ public:
/*! \brief Mape regression loss for regression task */
class MAPEMetric : public RegressionMetric<MAPEMetric> {
public:
public:
explicit MAPEMetric(const Config& config) :RegressionMetric<MAPEMetric>(config) {
}
......@@ -248,7 +248,7 @@ public:
};
class GammaMetric : public RegressionMetric<GammaMetric> {
public:
public:
explicit GammaMetric(const Config& config) :RegressionMetric<GammaMetric>(config) {
}
......@@ -271,7 +271,7 @@ public:
class GammaDevianceMetric : public RegressionMetric<GammaDevianceMetric> {
public:
public:
explicit GammaDevianceMetric(const Config& config) :RegressionMetric<GammaDevianceMetric>(config) {
}
......@@ -292,7 +292,7 @@ public:
};
class TweedieMetric : public RegressionMetric<TweedieMetric> {
public:
public:
explicit TweedieMetric(const Config& config) :RegressionMetric<TweedieMetric>(config) {
}
......
......@@ -65,7 +65,7 @@ namespace LightGBM {
// CrossEntropyMetric : "xentropy" : (optional) weights are used linearly
//
class CrossEntropyMetric : public Metric {
public:
public:
explicit CrossEntropyMetric(const Config&) {}
virtual ~CrossEntropyMetric() {}
......@@ -142,7 +142,7 @@ public:
return -1.0f; // negative means smaller loss is better, positive means larger loss is better
}
private:
private:
/*! \brief Number of data points */
data_size_t num_data_;
/*! \brief Pointer to label */
......@@ -160,7 +160,7 @@ private:
// ATTENTION: Supposed to be used when the objective also is "xentlambda"
//
class CrossEntropyLambdaMetric : public Metric {
public:
public:
explicit CrossEntropyLambdaMetric(const Config&) {}
virtual ~CrossEntropyLambdaMetric() {}
......@@ -228,7 +228,7 @@ public:
return -1.0f;
}
private:
private:
/*! \brief Number of data points */
data_size_t num_data_;
/*! \brief Pointer to label */
......@@ -243,7 +243,7 @@ private:
// KullbackLeiblerDivergence : "kldiv" : (optional) weights are used linearly
//
class KullbackLeiblerDivergence : public Metric {
public:
public:
explicit KullbackLeiblerDivergence(const Config&) {}
virtual ~KullbackLeiblerDivergence() {}
......@@ -336,7 +336,7 @@ public:
return -1.0f;
}
private:
private:
/*! \brief Number of data points */
data_size_t num_data_;
/*! \brief Pointer to label */
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment