Unverified Commit 50f11a9f authored by Nikita Titov's avatar Nikita Titov Committed by GitHub
Browse files

[ci][c++] fixed `whitespace/indent_namespace` errors from cpplint (#7056)



* dev

* dev

* dev

* dev

* dev

---------
Co-authored-by: default avatarJames Lamb <jaylamb20@gmail.com>
parent 6f0d7cc2
...@@ -28,7 +28,7 @@ repos: ...@@ -28,7 +28,7 @@ repos:
- id: cpplint - id: cpplint
args: args:
- --recursive - --recursive
- --filter=-build/include_subdir,-build/header_guard,-whitespace/indent_namespace,-whitespace/line_length - --filter=-build/include_subdir,-build/header_guard,-whitespace/line_length
- repo: local - repo: local
hooks: hooks:
- id: check-omp-pragmas - id: check-omp-pragmas
......
...@@ -83,7 +83,7 @@ const int kAlignedSize = 32; ...@@ -83,7 +83,7 @@ const int kAlignedSize = 32;
// Refer to https://docs.microsoft.com/en-us/cpp/error-messages/compiler-warnings/compiler-warning-level-4-c4127?view=vs-2019 // Refer to https://docs.microsoft.com/en-us/cpp/error-messages/compiler-warnings/compiler-warning-level-4-c4127?view=vs-2019
#ifdef _MSC_VER #ifdef _MSC_VER
#pragma warning(disable : 4127) #pragma warning(disable : 4127)
#endif #endif
} // namespace LightGBM } // namespace LightGBM
......
This diff is collapsed.
...@@ -28,10 +28,8 @@ namespace LightGBM { ...@@ -28,10 +28,8 @@ namespace LightGBM {
const int Dataset::kSerializedReferenceVersionLength = 2; const int Dataset::kSerializedReferenceVersionLength = 2;
const char* Dataset::serialized_reference_version = "v1"; const char* Dataset::serialized_reference_version = "v1";
const char* Dataset::binary_file_token = const char* Dataset::binary_file_token = "______LightGBM_Binary_File_Token______\n";
"______LightGBM_Binary_File_Token______\n"; const char* Dataset::binary_serialized_reference_token = "______LightGBM_Binary_Serialized_Token______\n";
const char* Dataset::binary_serialized_reference_token =
"______LightGBM_Binary_Serialized_Token______\n";
Dataset::Dataset() { Dataset::Dataset() {
data_filename_ = "noname"; data_filename_ = "noname";
......
...@@ -12,8 +12,8 @@ ...@@ -12,8 +12,8 @@
namespace LightGBM { namespace LightGBM {
CUDABinaryLoglossMetric::CUDABinaryLoglossMetric(const Config& config): CUDABinaryLoglossMetric::CUDABinaryLoglossMetric(
CUDABinaryMetricInterface<BinaryLoglossMetric, CUDABinaryLoglossMetric>(config) {} const Config& config):CUDABinaryMetricInterface<BinaryLoglossMetric, CUDABinaryLoglossMetric>(config) {}
template <typename HOST_METRIC, typename CUDA_METRIC> template <typename HOST_METRIC, typename CUDA_METRIC>
std::vector<double> CUDABinaryMetricInterface<HOST_METRIC, CUDA_METRIC>::Eval(const double* score, const ObjectiveFunction* objective) const { std::vector<double> CUDABinaryMetricInterface<HOST_METRIC, CUDA_METRIC>::Eval(const double* score, const ObjectiveFunction* objective) const {
......
...@@ -30,40 +30,40 @@ ...@@ -30,40 +30,40 @@
namespace LightGBM { namespace LightGBM {
// label should be in interval [0, 1]; // label should be in interval [0, 1];
// prob should be in interval (0, 1); prob is clipped if needed // prob should be in interval (0, 1); prob is clipped if needed
inline static double XentLoss(label_t label, double prob) { inline static double XentLoss(label_t label, double prob) {
const double log_arg_epsilon = 1.0e-12; const double log_arg_epsilon = 1.0e-12;
double a = label; double a = label;
if (prob > log_arg_epsilon) { if (prob > log_arg_epsilon) {
a *= std::log(prob); a *= std::log(prob);
} else { } else {
a *= std::log(log_arg_epsilon); a *= std::log(log_arg_epsilon);
}
double b = 1.0f - label;
if (1.0f - prob > log_arg_epsilon) {
b *= std::log(1.0f - prob);
} else {
b *= std::log(log_arg_epsilon);
}
return - (a + b);
} }
double b = 1.0f - label;
// hhat >(=) 0 assumed; and weight > 0 required; but not checked here if (1.0f - prob > log_arg_epsilon) {
inline static double XentLambdaLoss(label_t label, label_t weight, double hhat) { b *= std::log(1.0f - prob);
return XentLoss(label, 1.0f - std::exp(-weight * hhat)); } else {
} b *= std::log(log_arg_epsilon);
// Computes the (negative) entropy for label p; p should be in interval [0, 1];
// This is used to presum the KL-divergence offset term (to be _added_ to the cross-entropy loss).
// NOTE: x*log(x) = 0 for x=0,1; so only add when in (0, 1); avoid log(0)*0
inline static double YentLoss(double p) {
double hp = 0.0;
if (p > 0) hp += p * std::log(p);
double q = 1.0f - p;
if (q > 0) hp += q * std::log(q);
return hp;
} }
return - (a + b);
}
// hhat >(=) 0 assumed; and weight > 0 required; but not checked here
inline static double XentLambdaLoss(label_t label, label_t weight, double hhat) {
return XentLoss(label, 1.0f - std::exp(-weight * hhat));
}
// Computes the (negative) entropy for label p; p should be in interval [0, 1];
// This is used to presum the KL-divergence offset term (to be _added_ to the cross-entropy loss).
// NOTE: x*log(x) = 0 for x=0,1; so only add when in (0, 1); avoid log(0)*0
inline static double YentLoss(double p) {
double hp = 0.0;
if (p > 0) hp += p * std::log(p);
double q = 1.0f - p;
if (q > 0) hp += q * std::log(q);
return hp;
}
// //
// CrossEntropyMetric : "xentropy" : (optional) weights are used linearly // CrossEntropyMetric : "xentropy" : (optional) weights are used linearly
......
...@@ -12,8 +12,7 @@ ...@@ -12,8 +12,7 @@
namespace LightGBM { namespace LightGBM {
template <typename TREELEARNER_T> template <typename TREELEARNER_T>
DataParallelTreeLearner<TREELEARNER_T>::DataParallelTreeLearner(const Config* config) DataParallelTreeLearner<TREELEARNER_T>::DataParallelTreeLearner(const Config* config):TREELEARNER_T(config) {
:TREELEARNER_T(config) {
} }
template <typename TREELEARNER_T> template <typename TREELEARNER_T>
......
...@@ -384,8 +384,11 @@ void FeatureHistogram::FindBestThresholdCategoricalInner(double sum_gradient, ...@@ -384,8 +384,11 @@ void FeatureHistogram::FindBestThresholdCategoricalInner(double sum_gradient,
} }
} }
template <bool USE_RAND, bool USE_MC, bool USE_L1, bool USE_MAX_OUTPUT, bool USE_SMOOTHING, typename PACKED_HIST_BIN_T, typename PACKED_HIST_ACC_T, template <
typename HIST_BIN_T, typename HIST_ACC_T, int HIST_BITS_BIN, int HIST_BITS_ACC> bool USE_RAND, bool USE_MC, bool USE_L1, bool USE_MAX_OUTPUT, bool USE_SMOOTHING,
typename PACKED_HIST_BIN_T, typename PACKED_HIST_ACC_T, typename HIST_BIN_T, typename HIST_ACC_T,
int HIST_BITS_BIN, int HIST_BITS_ACC
>
void FeatureHistogram::FindBestThresholdCategoricalIntInner(int64_t int_sum_gradient_and_hessian, void FeatureHistogram::FindBestThresholdCategoricalIntInner(int64_t int_sum_gradient_and_hessian,
const double grad_scale, const double hess_scale, const double grad_scale, const double hess_scale,
data_size_t num_data, data_size_t num_data,
......
...@@ -11,8 +11,7 @@ namespace LightGBM { ...@@ -11,8 +11,7 @@ namespace LightGBM {
template <typename TREELEARNER_T> template <typename TREELEARNER_T>
FeatureParallelTreeLearner<TREELEARNER_T>::FeatureParallelTreeLearner(const Config* config) FeatureParallelTreeLearner<TREELEARNER_T>::FeatureParallelTreeLearner(const Config* config):TREELEARNER_T(config) {
:TREELEARNER_T(config) {
} }
template <typename TREELEARNER_T> template <typename TREELEARNER_T>
......
...@@ -15,8 +15,7 @@ ...@@ -15,8 +15,7 @@
namespace LightGBM { namespace LightGBM {
template <typename TREELEARNER_T> template <typename TREELEARNER_T>
VotingParallelTreeLearner<TREELEARNER_T>::VotingParallelTreeLearner(const Config* config) VotingParallelTreeLearner<TREELEARNER_T>::VotingParallelTreeLearner(const Config* config):TREELEARNER_T(config) {
:TREELEARNER_T(config) {
top_k_ = this->config_->top_k; top_k_ = this->config_->top_k;
} }
......
This diff is collapsed.
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment