Commit 3f4ef95b authored by Guolin Ke's avatar Guolin Ke
Browse files

fix warning

parent fb732c34
......@@ -74,9 +74,9 @@ public:
protected:
/*! \brief delta for Huber loss */
double huber_delta_;
score_t huber_delta_;
/*! \brief c for Fair loss */
double fair_c_;
score_t fair_c_;
private:
/*! \brief Number of data */
......@@ -126,40 +126,40 @@ public:
/*! \brief Huber loss for regression task */
class HuberLossMetric: public RegressionMetric<HuberLossMetric> {
public:
explicit HuberLossMetric(const MetricConfig& config) :RegressionMetric<HuberLossMetric>(config) {
huber_delta_ = config.huber_delta;
}
explicit HuberLossMetric(const MetricConfig& config) :RegressionMetric<HuberLossMetric>(config) {
huber_delta_ = static_cast<score_t>(config.huber_delta);
}
inline static score_t LossOnPoint(float label, score_t score, float delta, float) {
const double diff = score - label;
if (std::abs(diff) <= delta) {
return 0.5 * diff * diff;
} else {
return delta * (std::abs(diff) - 0.5 * delta);
}
inline static score_t LossOnPoint(float label, score_t score, float delta, float) {
const score_t diff = score - label;
if (std::abs(diff) <= delta) {
return 0.5f * diff * diff;
} else {
return delta * (std::abs(diff) - 0.5f * delta);
}
}
inline static const char* Name() {
return "huber";
}
inline static const char* Name() {
return "huber";
}
};
/*! \brief Fair loss for regression task */
// http://research.microsoft.com/en-us/um/people/zhang/INRIA/Publis/Tutorial-Estim/node24.html
class FairLossMetric: public RegressionMetric<FairLossMetric> {
public:
explicit FairLossMetric(const MetricConfig& config) :RegressionMetric<FairLossMetric>(config) {
fair_c_ = config.fair_c;
}
explicit FairLossMetric(const MetricConfig& config) :RegressionMetric<FairLossMetric>(config) {
fair_c_ = static_cast<score_t>(config.fair_c);
}
inline static score_t LossOnPoint(float label, score_t score, float, float c) {
const double x = std::abs(score - label);
return c * x - c * c * std::log(1.0 + x / c);
}
inline static score_t LossOnPoint(float label, score_t score, float, float c) {
const score_t x = std::fabs(score - label);
return c * x - c * c * std::log(1.0f + x / c);
}
inline static const char* Name() {
return "fair";
}
inline static const char* Name() {
return "fair";
}
};
} // namespace LightGBM
......
......@@ -23,15 +23,15 @@ public:
}
void GetGradients(const score_t* score, score_t* gradients,
score_t* hessians) const override {
score_t* hessians) const override {
if (weights_ == nullptr) {
#pragma omp parallel for schedule(static)
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data_; ++i) {
gradients[i] = (score[i] - label_[i]);
hessians[i] = 1.0;
}
} else {
#pragma omp parallel for schedule(static)
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data_; ++i) {
gradients[i] = (score[i] - label_[i]) * weights_[i];
hessians[i] = weights_[i];
......@@ -52,10 +52,12 @@ private:
const float* weights_;
};
/*!
* \brief L1 regression loss
*/
class RegressionL1loss: public ObjectiveFunction {
public:
explicit RegressionL1loss(const ObjectiveConfig& config) {}
explicit RegressionL1loss(const ObjectiveConfig&) {}
~RegressionL1loss() {}
......@@ -66,28 +68,28 @@ public:
}
void GetGradients(const score_t* score, score_t* gradients,
score_t* hessians) const override {
score_t* hessians) const override {
if (weights_ == nullptr) {
#pragma omp parallel for schedule(static)
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data_; ++i) {
const double diff = score[i] - label_[i];
if (diff >= 0.0) {
gradients[i] = 1.0;
const score_t diff = score[i] - label_[i];
if (diff >= 0.0f) {
gradients[i] = 1.0f;
} else {
gradients[i] = -1.0;
gradients[i] = -1.0f;
}
hessians[i] = Common::ApproximateHessianWithGaussian(score[i], label_[i], gradients[i]);
hessians[i] = static_cast<score_t>(Common::ApproximateHessianWithGaussian(score[i], label_[i], gradients[i]));
}
} else {
#pragma omp parallel for schedule(static)
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data_; ++i) {
const double diff = score[i] - label_[i];
if (diff >= 0.0) {
const score_t diff = score[i] - label_[i];
if (diff >= 0.0f) {
gradients[i] = weights_[i];
} else {
gradients[i] = -weights_[i];
}
hessians[i] = Common::ApproximateHessianWithGaussian(score[i], label_[i], gradients[i], weights_[i]);
hessians[i] = static_cast<score_t>(Common::ApproximateHessianWithGaussian(score[i], label_[i], gradients[i], weights_[i]));
}
}
}
......@@ -105,11 +107,13 @@ private:
const float* weights_;
};
/*!
* \brief Huber regression loss
*/
class RegressionHuberLoss: public ObjectiveFunction {
public:
explicit RegressionHuberLoss(const ObjectiveConfig& config) {
delta_ = config.huber_delta;
delta_ = static_cast<score_t>(config.huber_delta);
}
~RegressionHuberLoss() {
......@@ -122,39 +126,39 @@ public:
}
void GetGradients(const score_t* score, score_t* gradients,
score_t* hessians) const override {
score_t* hessians) const override {
if (weights_ == nullptr) {
#pragma omp parallel for schedule(static)
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data_; ++i) {
const double diff = score[i] - label_[i];
const score_t diff = score[i] - label_[i];
if (std::abs(diff) <= delta_) {
gradients[i] = diff;
hessians[i] = 1.0;
hessians[i] = 1.0f;
} else {
if (diff >= 0.0) {
gradients[i] = delta_;
} else {
gradients[i] = -delta_;
}
hessians[i] = Common::ApproximateHessianWithGaussian(score[i], label_[i], gradients[i]);
if (diff >= 0.0f) {
gradients[i] = delta_;
} else {
gradients[i] = -delta_;
}
hessians[i] = static_cast<score_t>(Common::ApproximateHessianWithGaussian(score[i], label_[i], gradients[i]));
}
}
} else {
#pragma omp parallel for schedule(static)
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data_; ++i) {
const double diff = score[i] - label_[i];
const score_t diff = score[i] - label_[i];
if (std::abs(diff) <= delta_) {
gradients[i] = diff * weights_[i];
hessians[i] = weights_[i];
} else {
if (diff >= 0.0) {
gradients[i] = delta_ * weights_[i];
} else {
gradients[i] = -delta_ * weights_[i];
}
hessians[i] = Common::ApproximateHessianWithGaussian(score[i], label_[i], gradients[i], weights_[i]);
if (diff >= 0.0f) {
gradients[i] = delta_ * weights_[i];
} else {
gradients[i] = -delta_ * weights_[i];
}
hessians[i] = static_cast<score_t>(Common::ApproximateHessianWithGaussian(score[i], label_[i], gradients[i], weights_[i]));
}
}
}
......@@ -172,7 +176,7 @@ private:
/*! \brief Pointer of weights */
const float* weights_;
/*! \brief delta for Huber loss */
double delta_;
score_t delta_;
};
......@@ -180,7 +184,7 @@ private:
class RegressionFairLoss: public ObjectiveFunction {
public:
explicit RegressionFairLoss(const ObjectiveConfig& config) {
c_ = config.fair_c;
c_ = static_cast<score_t>(config.fair_c);
}
~RegressionFairLoss() {}
......@@ -192,18 +196,18 @@ public:
}
void GetGradients(const score_t* score, score_t* gradients,
score_t* hessians) const override {
score_t* hessians) const override {
if (weights_ == nullptr) {
#pragma omp parallel for schedule(static)
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data_; ++i) {
const double x = score[i] - label_[i];
const score_t x = score[i] - label_[i];
gradients[i] = c_ * x / (std::fabs(x) + c_);
hessians[i] = c_ * c_ / ((std::fabs(x) + c_) * (std::fabs(x) + c_));
}
} else {
#pragma omp parallel for schedule(static)
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data_; ++i) {
const double x = score[i] - label_[i];
const score_t x = score[i] - label_[i];
gradients[i] = c_ * x / (std::fabs(x) + c_);
gradients[i] *= weights_[i];
hessians[i] = c_ * c_ / ((std::fabs(x) + c_) * (std::fabs(x) + c_));
......@@ -224,7 +228,7 @@ private:
/*! \brief Pointer of weights */
const float* weights_;
/*! \brief c for Fair loss */
double c_;
score_t c_;
};
} // namespace LightGBM
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment