Commit 3f4ef95b authored by Guolin Ke's avatar Guolin Ke
Browse files

fix warning

parent fb732c34
...@@ -74,9 +74,9 @@ public: ...@@ -74,9 +74,9 @@ public:
protected: protected:
/*! \brief delta for Huber loss */ /*! \brief delta for Huber loss */
double huber_delta_; score_t huber_delta_;
/*! \brief c for Fair loss */ /*! \brief c for Fair loss */
double fair_c_; score_t fair_c_;
private: private:
/*! \brief Number of data */ /*! \brief Number of data */
...@@ -127,15 +127,15 @@ public: ...@@ -127,15 +127,15 @@ public:
class HuberLossMetric: public RegressionMetric<HuberLossMetric> { class HuberLossMetric: public RegressionMetric<HuberLossMetric> {
public: public:
explicit HuberLossMetric(const MetricConfig& config) :RegressionMetric<HuberLossMetric>(config) { explicit HuberLossMetric(const MetricConfig& config) :RegressionMetric<HuberLossMetric>(config) {
huber_delta_ = config.huber_delta; huber_delta_ = static_cast<score_t>(config.huber_delta);
} }
inline static score_t LossOnPoint(float label, score_t score, float delta, float) { inline static score_t LossOnPoint(float label, score_t score, float delta, float) {
const double diff = score - label; const score_t diff = score - label;
if (std::abs(diff) <= delta) { if (std::abs(diff) <= delta) {
return 0.5 * diff * diff; return 0.5f * diff * diff;
} else { } else {
return delta * (std::abs(diff) - 0.5 * delta); return delta * (std::abs(diff) - 0.5f * delta);
} }
} }
...@@ -149,12 +149,12 @@ public: ...@@ -149,12 +149,12 @@ public:
class FairLossMetric: public RegressionMetric<FairLossMetric> { class FairLossMetric: public RegressionMetric<FairLossMetric> {
public: public:
explicit FairLossMetric(const MetricConfig& config) :RegressionMetric<FairLossMetric>(config) { explicit FairLossMetric(const MetricConfig& config) :RegressionMetric<FairLossMetric>(config) {
fair_c_ = config.fair_c; fair_c_ = static_cast<score_t>(config.fair_c);
} }
inline static score_t LossOnPoint(float label, score_t score, float, float c) { inline static score_t LossOnPoint(float label, score_t score, float, float c) {
const double x = std::abs(score - label); const score_t x = std::fabs(score - label);
return c * x - c * c * std::log(1.0 + x / c); return c * x - c * c * std::log(1.0f + x / c);
} }
inline static const char* Name() { inline static const char* Name() {
......
...@@ -25,13 +25,13 @@ public: ...@@ -25,13 +25,13 @@ public:
void GetGradients(const score_t* score, score_t* gradients, void GetGradients(const score_t* score, score_t* gradients,
score_t* hessians) const override { score_t* hessians) const override {
if (weights_ == nullptr) { if (weights_ == nullptr) {
#pragma omp parallel for schedule(static) #pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data_; ++i) { for (data_size_t i = 0; i < num_data_; ++i) {
gradients[i] = (score[i] - label_[i]); gradients[i] = (score[i] - label_[i]);
hessians[i] = 1.0; hessians[i] = 1.0;
} }
} else { } else {
#pragma omp parallel for schedule(static) #pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data_; ++i) { for (data_size_t i = 0; i < num_data_; ++i) {
gradients[i] = (score[i] - label_[i]) * weights_[i]; gradients[i] = (score[i] - label_[i]) * weights_[i];
hessians[i] = weights_[i]; hessians[i] = weights_[i];
...@@ -52,10 +52,12 @@ private: ...@@ -52,10 +52,12 @@ private:
const float* weights_; const float* weights_;
}; };
/*!
* \brief L1 regression loss
*/
class RegressionL1loss: public ObjectiveFunction { class RegressionL1loss: public ObjectiveFunction {
public: public:
explicit RegressionL1loss(const ObjectiveConfig& config) {} explicit RegressionL1loss(const ObjectiveConfig&) {}
~RegressionL1loss() {} ~RegressionL1loss() {}
...@@ -68,26 +70,26 @@ public: ...@@ -68,26 +70,26 @@ public:
void GetGradients(const score_t* score, score_t* gradients, void GetGradients(const score_t* score, score_t* gradients,
score_t* hessians) const override { score_t* hessians) const override {
if (weights_ == nullptr) { if (weights_ == nullptr) {
#pragma omp parallel for schedule(static) #pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data_; ++i) { for (data_size_t i = 0; i < num_data_; ++i) {
const double diff = score[i] - label_[i]; const score_t diff = score[i] - label_[i];
if (diff >= 0.0) { if (diff >= 0.0f) {
gradients[i] = 1.0; gradients[i] = 1.0f;
} else { } else {
gradients[i] = -1.0; gradients[i] = -1.0f;
} }
hessians[i] = Common::ApproximateHessianWithGaussian(score[i], label_[i], gradients[i]); hessians[i] = static_cast<score_t>(Common::ApproximateHessianWithGaussian(score[i], label_[i], gradients[i]));
} }
} else { } else {
#pragma omp parallel for schedule(static) #pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data_; ++i) { for (data_size_t i = 0; i < num_data_; ++i) {
const double diff = score[i] - label_[i]; const score_t diff = score[i] - label_[i];
if (diff >= 0.0) { if (diff >= 0.0f) {
gradients[i] = weights_[i]; gradients[i] = weights_[i];
} else { } else {
gradients[i] = -weights_[i]; gradients[i] = -weights_[i];
} }
hessians[i] = Common::ApproximateHessianWithGaussian(score[i], label_[i], gradients[i], weights_[i]); hessians[i] = static_cast<score_t>(Common::ApproximateHessianWithGaussian(score[i], label_[i], gradients[i], weights_[i]));
} }
} }
} }
...@@ -105,11 +107,13 @@ private: ...@@ -105,11 +107,13 @@ private:
const float* weights_; const float* weights_;
}; };
/*!
* \brief Huber regression loss
*/
class RegressionHuberLoss: public ObjectiveFunction { class RegressionHuberLoss: public ObjectiveFunction {
public: public:
explicit RegressionHuberLoss(const ObjectiveConfig& config) { explicit RegressionHuberLoss(const ObjectiveConfig& config) {
delta_ = config.huber_delta; delta_ = static_cast<score_t>(config.huber_delta);
} }
~RegressionHuberLoss() { ~RegressionHuberLoss() {
...@@ -124,37 +128,37 @@ public: ...@@ -124,37 +128,37 @@ public:
void GetGradients(const score_t* score, score_t* gradients, void GetGradients(const score_t* score, score_t* gradients,
score_t* hessians) const override { score_t* hessians) const override {
if (weights_ == nullptr) { if (weights_ == nullptr) {
#pragma omp parallel for schedule(static) #pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data_; ++i) { for (data_size_t i = 0; i < num_data_; ++i) {
const double diff = score[i] - label_[i]; const score_t diff = score[i] - label_[i];
if (std::abs(diff) <= delta_) { if (std::abs(diff) <= delta_) {
gradients[i] = diff; gradients[i] = diff;
hessians[i] = 1.0; hessians[i] = 1.0f;
} else { } else {
if (diff >= 0.0) { if (diff >= 0.0f) {
gradients[i] = delta_; gradients[i] = delta_;
} else { } else {
gradients[i] = -delta_; gradients[i] = -delta_;
} }
hessians[i] = Common::ApproximateHessianWithGaussian(score[i], label_[i], gradients[i]); hessians[i] = static_cast<score_t>(Common::ApproximateHessianWithGaussian(score[i], label_[i], gradients[i]));
} }
} }
} else { } else {
#pragma omp parallel for schedule(static) #pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data_; ++i) { for (data_size_t i = 0; i < num_data_; ++i) {
const double diff = score[i] - label_[i]; const score_t diff = score[i] - label_[i];
if (std::abs(diff) <= delta_) { if (std::abs(diff) <= delta_) {
gradients[i] = diff * weights_[i]; gradients[i] = diff * weights_[i];
hessians[i] = weights_[i]; hessians[i] = weights_[i];
} else { } else {
if (diff >= 0.0) { if (diff >= 0.0f) {
gradients[i] = delta_ * weights_[i]; gradients[i] = delta_ * weights_[i];
} else { } else {
gradients[i] = -delta_ * weights_[i]; gradients[i] = -delta_ * weights_[i];
} }
hessians[i] = Common::ApproximateHessianWithGaussian(score[i], label_[i], gradients[i], weights_[i]); hessians[i] = static_cast<score_t>(Common::ApproximateHessianWithGaussian(score[i], label_[i], gradients[i], weights_[i]));
} }
} }
} }
...@@ -172,7 +176,7 @@ private: ...@@ -172,7 +176,7 @@ private:
/*! \brief Pointer of weights */ /*! \brief Pointer of weights */
const float* weights_; const float* weights_;
/*! \brief delta for Huber loss */ /*! \brief delta for Huber loss */
double delta_; score_t delta_;
}; };
...@@ -180,7 +184,7 @@ private: ...@@ -180,7 +184,7 @@ private:
class RegressionFairLoss: public ObjectiveFunction { class RegressionFairLoss: public ObjectiveFunction {
public: public:
explicit RegressionFairLoss(const ObjectiveConfig& config) { explicit RegressionFairLoss(const ObjectiveConfig& config) {
c_ = config.fair_c; c_ = static_cast<score_t>(config.fair_c);
} }
~RegressionFairLoss() {} ~RegressionFairLoss() {}
...@@ -194,16 +198,16 @@ public: ...@@ -194,16 +198,16 @@ public:
void GetGradients(const score_t* score, score_t* gradients, void GetGradients(const score_t* score, score_t* gradients,
score_t* hessians) const override { score_t* hessians) const override {
if (weights_ == nullptr) { if (weights_ == nullptr) {
#pragma omp parallel for schedule(static) #pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data_; ++i) { for (data_size_t i = 0; i < num_data_; ++i) {
const double x = score[i] - label_[i]; const score_t x = score[i] - label_[i];
gradients[i] = c_ * x / (std::fabs(x) + c_); gradients[i] = c_ * x / (std::fabs(x) + c_);
hessians[i] = c_ * c_ / ((std::fabs(x) + c_) * (std::fabs(x) + c_)); hessians[i] = c_ * c_ / ((std::fabs(x) + c_) * (std::fabs(x) + c_));
} }
} else { } else {
#pragma omp parallel for schedule(static) #pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data_; ++i) { for (data_size_t i = 0; i < num_data_; ++i) {
const double x = score[i] - label_[i]; const score_t x = score[i] - label_[i];
gradients[i] = c_ * x / (std::fabs(x) + c_); gradients[i] = c_ * x / (std::fabs(x) + c_);
gradients[i] *= weights_[i]; gradients[i] *= weights_[i];
hessians[i] = c_ * c_ / ((std::fabs(x) + c_) * (std::fabs(x) + c_)); hessians[i] = c_ * c_ / ((std::fabs(x) + c_) * (std::fabs(x) + c_));
...@@ -224,7 +228,7 @@ private: ...@@ -224,7 +228,7 @@ private:
/*! \brief Pointer of weights */ /*! \brief Pointer of weights */
const float* weights_; const float* weights_;
/*! \brief c for Fair loss */ /*! \brief c for Fair loss */
double c_; score_t c_;
}; };
} // namespace LightGBM } // namespace LightGBM
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment