"vscode:/vscode.git/clone" did not exist on "7689a4d0647b07cc3357e28e3eb85ae24d7e919a"
Commit 462612b4 authored by Nikita Titov's avatar Nikita Titov Committed by Guolin Ke
Browse files

fixed modifiers indent (#1997)

parent 8e286b38
......@@ -32,7 +32,7 @@ namespace LightGBM {
* This class will wrap all linkers to other machines if needs
*/
class Linkers {
public:
public:
Linkers() {
is_init_ = false;
}
......@@ -136,7 +136,7 @@ public:
#endif // USE_SOCKET
private:
private:
/*! \brief Rank of local machine */
int rank_;
/*! \brief Total number machines */
......
......@@ -86,7 +86,7 @@ const bool kNoDelay = true;
}
class TcpSocket {
public:
public:
TcpSocket() {
sockfd_ = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
if (sockfd_ == INVALID_SOCKET) {
......@@ -291,7 +291,7 @@ public:
}
}
private:
private:
SOCKET sockfd_;
};
......
......@@ -11,7 +11,7 @@ namespace LightGBM {
* \brief Objective function for binary classification
*/
class BinaryLogloss: public ObjectiveFunction {
public:
public:
explicit BinaryLogloss(const Config& config, std::function<bool(label_t)> is_pos = nullptr) {
sigmoid_ = static_cast<double>(config.sigmoid);
if (sigmoid_ <= 0.0) {
......@@ -172,7 +172,7 @@ public:
bool NeedAccuratePrediction() const override { return false; }
private:
private:
/*! \brief Number of data */
data_size_t num_data_;
/*! \brief Pointer of label */
......
......@@ -14,7 +14,7 @@ namespace LightGBM {
* \brief Objective function for multiclass classification, use softmax as objective functions
*/
class MulticlassSoftmax: public ObjectiveFunction {
public:
public:
explicit MulticlassSoftmax(const Config& config) {
num_class_ = config.num_class;
}
......@@ -146,7 +146,7 @@ public:
}
}
private:
private:
/*! \brief Number of data */
data_size_t num_data_;
/*! \brief Number of classes */
......@@ -164,7 +164,7 @@ private:
* \brief Objective function for multiclass classification, use one-vs-all binary objective function
*/
class MulticlassOVA: public ObjectiveFunction {
public:
public:
explicit MulticlassOVA(const Config& config) {
num_class_ = config.num_class;
for (int i = 0; i < num_class_; ++i) {
......@@ -246,7 +246,7 @@ public:
return binary_loss_[class_id]->ClassNeedTrain(0);
}
private:
private:
/*! \brief Number of data */
data_size_t num_data_;
/*! \brief Number of classes */
......
......@@ -17,7 +17,7 @@ namespace LightGBM {
* \brief Objective function for Lambdrank with NDCG
*/
class LambdarankNDCG: public ObjectiveFunction {
public:
public:
explicit LambdarankNDCG(const Config& config) {
sigmoid_ = static_cast<double>(config.sigmoid);
label_gain_ = config.label_gain;
......@@ -205,7 +205,7 @@ public:
bool NeedAccuratePrediction() const override { return false; }
private:
private:
/*! \brief Gains for labels */
std::vector<double> label_gain_;
/*! \brief Cache inverse max DCG, speed up calculation */
......
......@@ -69,7 +69,7 @@ namespace LightGBM {
* \brief Objective function for regression
*/
class RegressionL2loss: public ObjectiveFunction {
public:
public:
explicit RegressionL2loss(const Config& config) {
sqrt_ = config.reg_sqrt;
}
......@@ -165,7 +165,7 @@ public:
return suml / sumw;
}
protected:
protected:
bool sqrt_;
/*! \brief Number of data */
data_size_t num_data_;
......@@ -180,7 +180,7 @@ protected:
* \brief L1 regression loss
*/
class RegressionL1loss: public RegressionL2loss {
public:
public:
explicit RegressionL1loss(const Config& config): RegressionL2loss(config) {
}
......@@ -298,7 +298,7 @@ public:
* \brief Huber regression loss
*/
class RegressionHuberLoss: public RegressionL2loss {
public:
public:
explicit RegressionHuberLoss(const Config& config): RegressionL2loss(config) {
alpha_ = static_cast<double>(config.alpha);
if (sqrt_) {
......@@ -352,7 +352,7 @@ public:
return false;
}
private:
private:
/*! \brief delta for Huber loss */
double alpha_;
};
......@@ -360,7 +360,7 @@ private:
// http://research.microsoft.com/en-us/um/people/zhang/INRIA/Publis/Tutorial-Estim/node24.html
class RegressionFairLoss: public RegressionL2loss {
public:
public:
explicit RegressionFairLoss(const Config& config): RegressionL2loss(config) {
c_ = static_cast<double>(config.fair_c);
}
......@@ -397,7 +397,7 @@ public:
return false;
}
private:
private:
/*! \brief c for Fair loss */
double c_;
};
......@@ -407,7 +407,7 @@ private:
* \brief Objective function for Poisson regression
*/
class RegressionPoissonLoss: public RegressionL2loss {
public:
public:
explicit RegressionPoissonLoss(const Config& config): RegressionL2loss(config) {
max_delta_step_ = static_cast<double>(config.poisson_max_delta_step);
if (sqrt_) {
......@@ -481,13 +481,13 @@ public:
return false;
}
private:
private:
/*! \brief used to safeguard optimization */
double max_delta_step_;
};
class RegressionQuantileloss : public RegressionL2loss {
public:
public:
explicit RegressionQuantileloss(const Config& config): RegressionL2loss(config) {
alpha_ = static_cast<score_t>(config.alpha);
CHECK(alpha_ > 0 && alpha_ < 1);
......@@ -607,7 +607,7 @@ public:
}
}
private:
private:
score_t alpha_;
};
......@@ -616,7 +616,7 @@ private:
* \brief Mape Regression Loss
*/
class RegressionMAPELOSS : public RegressionL1loss {
public:
public:
explicit RegressionMAPELOSS(const Config& config) : RegressionL1loss(config) {
}
......@@ -725,7 +725,7 @@ public:
return true;
}
private:
private:
std::vector<label_t> label_weight_;
};
......@@ -735,7 +735,7 @@ private:
* \brief Objective function for Gamma regression
*/
class RegressionGammaLoss : public RegressionPoissonLoss {
public:
public:
explicit RegressionGammaLoss(const Config& config) : RegressionPoissonLoss(config) {
}
......@@ -770,7 +770,7 @@ public:
* \brief Objective function for Tweedie regression
*/
class RegressionTweedieLoss: public RegressionPoissonLoss {
public:
public:
explicit RegressionTweedieLoss(const Config& config) : RegressionPoissonLoss(config) {
rho_ = config.tweedie_variance_power;
}
......@@ -803,7 +803,7 @@ public:
return "tweedie";
}
private:
private:
double rho_;
};
......
......@@ -36,7 +36,7 @@ namespace LightGBM {
* \brief Objective function for cross-entropy (with optional linear weights)
*/
class CrossEntropy: public ObjectiveFunction {
public:
public:
explicit CrossEntropy(const Config&) {
}
......@@ -127,7 +127,7 @@ public:
return initscore;
}
private:
private:
/*! \brief Number of data points */
data_size_t num_data_;
/*! \brief Pointer for label */
......@@ -140,7 +140,7 @@ private:
* \brief Objective function for alternative parameterization of cross-entropy (see top of file for explanation)
*/
class CrossEntropyLambda: public ObjectiveFunction {
public:
public:
explicit CrossEntropyLambda(const Config&) {
min_weight_ = max_weight_ = 0.0f;
}
......
......@@ -15,7 +15,7 @@ namespace LightGBM {
* \brief DataPartition is used to store the the partition of data on tree.
*/
class DataPartition {
public:
public:
DataPartition(data_size_t num_data, int num_leaves)
:num_data_(num_data), num_leaves_(num_leaves) {
leaf_begin_.resize(num_leaves_);
......@@ -188,7 +188,7 @@ public:
/*! \brief Get number of leaves */
int num_leaves() const { return num_leaves_; }
private:
private:
/*! \brief Number of all data */
data_size_t num_data_;
/*! \brief Number of all leaves */
......
......@@ -12,7 +12,7 @@
namespace LightGBM {
class FeatureMetainfo {
public:
public:
int num_bin;
MissingType missing_type;
int8_t bias = 0;
......@@ -27,7 +27,7 @@ public:
* \brief FeatureHistogram is used to construct and store a histogram for a feature.
*/
class FeatureHistogram {
public:
public:
FeatureHistogram() {
data_ = nullptr;
}
......@@ -449,7 +449,7 @@ public:
}
}
private:
private:
static double GetSplitGains(double sum_left_gradients, double sum_left_hessians,
double sum_right_gradients, double sum_right_hessians,
double l1, double l2, double max_delta_step,
......@@ -644,7 +644,7 @@ private:
std::function<void(double, double, data_size_t, double, double, SplitInfo*)> find_best_threshold_fun_;
};
class HistogramPool {
public:
public:
/*!
* \brief Constructor
*/
......@@ -804,7 +804,7 @@ public:
inverse_mapper_[slot] = dst_idx;
}
private:
private:
std::vector<std::unique_ptr<FeatureHistogram[]>> pool_;
std::vector<std::vector<HistogramBinEntry>> data_;
std::vector<FeatureMetainfo> feature_metas_;
......
......@@ -36,7 +36,7 @@ namespace LightGBM {
* \brief GPU-based parallel learning algorithm.
*/
class GPUTreeLearner: public SerialTreeLearner {
public:
public:
explicit GPUTreeLearner(const Config* tree_config);
~GPUTreeLearner();
void Init(const Dataset* train_data, bool is_constant_hessian) override;
......@@ -57,14 +57,14 @@ public:
use_bagging_ = false;
}
protected:
protected:
void BeforeTrain() override;
bool BeforeFindBestSplit(const Tree* tree, int left_leaf, int right_leaf) override;
void FindBestSplits() override;
void Split(Tree* tree, int best_Leaf, int* left_leaf, int* right_leaf) override;
void ConstructHistograms(const std::vector<int8_t>& is_feature_used, bool use_subtract) override;
private:
private:
/*! \brief 4-byte feature tuple used by GPU kernels */
struct Feature4 {
uint8_t s[4];
......@@ -269,7 +269,7 @@ private:
namespace LightGBM {
class GPUTreeLearner: public SerialTreeLearner {
public:
public:
#pragma warning(disable : 4702)
explicit GPUTreeLearner(const Config* tree_config) : SerialTreeLearner(tree_config) {
Log::Fatal("GPU Tree Learner was not enabled in this build.\n"
......
......@@ -14,7 +14,7 @@ namespace LightGBM {
* \brief used to find split candidates for a leaf
*/
class LeafSplits {
public:
public:
LeafSplits(data_size_t num_data)
:num_data_in_leaf_(num_data), num_data_(num_data),
data_indices_(nullptr) {
......@@ -141,7 +141,7 @@ public:
const data_size_t* data_indices() const { return data_indices_; }
private:
private:
/*! \brief current leaf index */
int leaf_index_;
/*! \brief number of data on current leaf */
......
......@@ -20,15 +20,16 @@ namespace LightGBM {
*/
template <typename TREELEARNER_T>
class FeatureParallelTreeLearner: public TREELEARNER_T {
public:
public:
explicit FeatureParallelTreeLearner(const Config* config);
~FeatureParallelTreeLearner();
void Init(const Dataset* train_data, bool is_constant_hessian) override;
protected:
protected:
void BeforeTrain() override;
void FindBestSplitsFromHistograms(const std::vector<int8_t>& is_feature_used, bool use_subtract) override;
private:
private:
/*! \brief rank of local machine */
int rank_;
/*! \brief Number of machines of this parallel task */
......@@ -46,13 +47,13 @@ private:
*/
template <typename TREELEARNER_T>
class DataParallelTreeLearner: public TREELEARNER_T {
public:
public:
explicit DataParallelTreeLearner(const Config* config);
~DataParallelTreeLearner();
void Init(const Dataset* train_data, bool is_constant_hessian) override;
void ResetConfig(const Config* config) override;
protected:
protected:
void BeforeTrain() override;
void FindBestSplits() override;
void FindBestSplitsFromHistograms(const std::vector<int8_t>& is_feature_used, bool use_subtract) override;
......@@ -66,7 +67,7 @@ protected:
}
}
private:
private:
/*! \brief Rank of local machine */
int rank_;
/*! \brief Number of machines of this parallel task */
......@@ -100,13 +101,13 @@ private:
*/
template <typename TREELEARNER_T>
class VotingParallelTreeLearner: public TREELEARNER_T {
public:
public:
explicit VotingParallelTreeLearner(const Config* config);
~VotingParallelTreeLearner() { }
void Init(const Dataset* train_data, bool is_constant_hessian) override;
void ResetConfig(const Config* config) override;
protected:
protected:
void BeforeTrain() override;
bool BeforeFindBestSplit(const Tree* tree, int left_leaf, int right_leaf) override;
void FindBestSplits() override;
......@@ -136,7 +137,7 @@ protected:
void CopyLocalHistogram(const std::vector<int>& smaller_top_features,
const std::vector<int>& larger_top_features);
private:
private:
/*! \brief Tree config used in local mode */
Config local_config_;
/*! \brief Voting size */
......
......@@ -32,7 +32,7 @@ namespace LightGBM {
* \brief Used for learning a tree by single machine
*/
class SerialTreeLearner: public TreeLearner {
public:
public:
explicit SerialTreeLearner(const Config* config);
~SerialTreeLearner();
......@@ -75,7 +75,7 @@ public:
void RenewTreeOutput(Tree* tree, const ObjectiveFunction* obj, double prediction,
data_size_t total_num_data, const data_size_t* bag_indices, data_size_t bag_cnt) const override;
protected:
protected:
/*!
* \brief Some initial works before training
*/
......
......@@ -15,7 +15,7 @@ namespace LightGBM {
* \brief Used to store some information for gain split point
*/
struct SplitInfo {
public:
public:
/*! \brief Feature index */
int feature = -1;
/*! \brief Split threshold */
......@@ -188,7 +188,7 @@ public:
};
struct LightSplitInfo {
public:
public:
/*! \brief Feature index */
int feature = -1;
/*! \brief Split gain */
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment