Unverified Commit 8ebef94c authored by Guolin Ke's avatar Guolin Ke Committed by GitHub
Browse files

quantile objective function & metric (#1043)

* add quantile metric.

* first draft.

* add "sqrt" transform in regression objective function.

* fix a bug.

* update parameter doc

* fix doc
parent 14a6d94f
......@@ -63,6 +63,10 @@ Core Parameters
- ``poisson``, `Poisson regression`_
- ``quantile``, `Quantile Regression`_
- ``quantile_l2``, `Like the ``quantile``, but use l2 loss instead
- ``binary``, binary classification application
- ``lambdarank``, `lambdarank`_ application
......@@ -490,9 +494,9 @@ Objective Parameters
- parameter for sigmoid function. Will be used in ``binary`` classification and ``lambdarank``
- ``huber_delta``, default=\ ``1.0``, type=double
- ``alpha``, default=\ ``0.9``, type=double
- parameter for `Huber loss`_. Will be used in ``regression`` task
- parameter for `Huber loss`_ and `Quantile Regression`_. Will be used in ``regression`` task
- ``fair_c``, default=\ ``1.0``, type=double
......@@ -540,6 +544,10 @@ Objective Parameters
- only used in ``multiclass`` classification
- ``reg_sqrt``, default=\ ``false``, type=bool
- only used in Regression. Will fit ``sqrt(label)`` instead. And prediction result is also automatically converted to ``pow2(prediction)``
Metric Parameters
-----------------
......@@ -552,6 +560,8 @@ Metric Parameters
- ``l2_root``, root square loss, alias=\ ``root_mean_squared_error``, ``rmse``
- ``quantile``, `Quantile Regression`_
- ``huber``, `Huber loss`_
- ``fair``, `Fair loss`_
......@@ -715,6 +725,8 @@ You can specific query/group id in data file now. Please refer to parameter ``gr
.. _Huber loss: https://en.wikipedia.org/wiki/Huber_loss
.. _Quantile Regression: https://en.wikipedia.org/wiki/Quantile_regression
.. _Fair loss: https://www.kaggle.com/c/allstate-claims-severity/discussion/24520
.. _Poisson regression: https://en.wikipedia.org/wiki/Poisson_regression
......
......@@ -164,7 +164,6 @@ struct ObjectiveConfig: public ConfigBase {
public:
virtual ~ObjectiveConfig() {}
double sigmoid = 1.0f;
double huber_delta = 1.0f;
double fair_c = 1.0f;
// for Approximate Hessian With Gaussian
double gaussian_eta = 1.0f;
......@@ -179,6 +178,9 @@ public:
int num_class = 1;
// Balancing of positive and negative weights
double scale_pos_weight = 1.0f;
// True will sqrt fit the sqrt(label)
bool reg_sqrt = false;
double alpha = 0.9f;
LIGHTGBM_EXPORT void Set(const std::unordered_map<std::string, std::string>& params) override;
};
......@@ -188,8 +190,8 @@ public:
virtual ~MetricConfig() {}
int num_class = 1;
double sigmoid = 1.0f;
double huber_delta = 1.0f;
double fair_c = 1.0f;
double alpha = 0.9f;
std::vector<double> label_gain;
std::vector<int> eval_at;
LIGHTGBM_EXPORT void Set(const std::unordered_map<std::string, std::string>& params) override;
......@@ -463,7 +465,7 @@ struct ParameterAlias {
"xgboost_dart_mode", "drop_seed", "top_rate", "other_rate",
"min_data_in_bin", "data_random_seed", "bin_construct_sample_cnt",
"num_iteration_predict", "pred_early_stop", "pred_early_stop_freq",
"pred_early_stop_margin", "use_missing", "sigmoid", "huber_delta",
"pred_early_stop_margin", "use_missing", "sigmoid",
"fair_c", "poission_max_delta_step", "scale_pos_weight",
"boost_from_average", "max_position", "label_gain",
"metric", "metric_freq", "time_out",
......@@ -474,7 +476,8 @@ struct ParameterAlias {
"max_conflict_rate", "poisson_max_delta_step", "gaussian_eta",
"histogram_pool_size", "output_freq", "is_provide_training_metric", "machine_list_filename", "machines",
"zero_as_missing", "init_score_file", "valid_init_score_file", "is_predict_contrib",
"max_cat_threshold", "cat_smooth", "min_data_per_group", "cat_l2", "max_cat_to_onehot"
"max_cat_threshold", "cat_smooth", "min_data_per_group", "cat_l2", "max_cat_to_onehot",
"alpha", "reg_sqrt"
});
std::unordered_map<std::string, std::string> tmp_map;
for (const auto& pair : *params) {
......
......@@ -303,7 +303,6 @@ void IOConfig::Set(const std::unordered_map<std::string, std::string>& params) {
void ObjectiveConfig::Set(const std::unordered_map<std::string, std::string>& params) {
GetBool(params, "is_unbalance", &is_unbalance);
GetDouble(params, "sigmoid", &sigmoid);
GetDouble(params, "huber_delta", &huber_delta);
GetDouble(params, "fair_c", &fair_c);
GetDouble(params, "gaussian_eta", &gaussian_eta);
GetDouble(params, "poisson_max_delta_step", &poisson_max_delta_step);
......@@ -312,6 +311,8 @@ void ObjectiveConfig::Set(const std::unordered_map<std::string, std::string>& pa
GetInt(params, "num_class", &num_class);
CHECK(num_class >= 1);
GetDouble(params, "scale_pos_weight", &scale_pos_weight);
GetDouble(params, "alpha", &alpha);
GetBool(params, "reg_sqrt", &reg_sqrt);
std::string tmp_str = "";
if (GetString(params, "label_gain", &tmp_str)) {
label_gain = Common::StringToArray<double>(tmp_str, ',');
......@@ -329,9 +330,9 @@ void ObjectiveConfig::Set(const std::unordered_map<std::string, std::string>& pa
void MetricConfig::Set(const std::unordered_map<std::string, std::string>& params) {
GetDouble(params, "sigmoid", &sigmoid);
GetDouble(params, "huber_delta", &huber_delta);
GetDouble(params, "fair_c", &fair_c);
GetInt(params, "num_class", &num_class);
GetDouble(params, "alpha", &alpha);
std::string tmp_str = "";
if (GetString(params, "label_gain", &tmp_str)) {
label_gain = Common::StringToArray<double>(tmp_str, ',');
......
......@@ -15,6 +15,8 @@ Metric* Metric::CreateMetric(const std::string& type, const MetricConfig& config
return new RMSEMetric(config);
} else if (type == std::string("l1") || type == std::string("mean_absolute_error") || type == std::string("mae")) {
return new L1Metric(config);
} else if (type == std::string("quantile")) {
return new QuantileMetric(config);
} else if (type == std::string("huber")) {
return new HuberLossMetric(config);
} else if (type == std::string("fair")) {
......
......@@ -15,7 +15,7 @@ namespace LightGBM {
template<typename PointWiseLossCalculator>
class RegressionMetric: public Metric {
public:
explicit RegressionMetric(const MetricConfig&) :huber_delta_(1.0f), fair_c_(1.0f) {
explicit RegressionMetric(const MetricConfig& config) :config_(config) {
}
virtual ~RegressionMetric() {
......@@ -32,7 +32,6 @@ public:
void Init(const Metadata& metadata, data_size_t num_data) override {
name_.emplace_back(PointWiseLossCalculator::Name());
num_data_ = num_data;
// get label
label_ = metadata.label();
......@@ -55,13 +54,13 @@ public:
#pragma omp parallel for schedule(static) reduction(+:sum_loss)
for (data_size_t i = 0; i < num_data_; ++i) {
// add loss
sum_loss += PointWiseLossCalculator::LossOnPoint(label_[i], score[i], huber_delta_, fair_c_);
sum_loss += PointWiseLossCalculator::LossOnPoint(label_[i], score[i], config_);
}
} else {
#pragma omp parallel for schedule(static) reduction(+:sum_loss)
for (data_size_t i = 0; i < num_data_; ++i) {
// add loss
sum_loss += PointWiseLossCalculator::LossOnPoint(label_[i], score[i], huber_delta_, fair_c_) * weights_[i];
sum_loss += PointWiseLossCalculator::LossOnPoint(label_[i], score[i], config_) * weights_[i];
}
}
} else {
......@@ -71,7 +70,7 @@ public:
// add loss
double t = 0;
objective->ConvertOutput(&score[i], &t);
sum_loss += PointWiseLossCalculator::LossOnPoint(label_[i], t, huber_delta_, fair_c_);
sum_loss += PointWiseLossCalculator::LossOnPoint(label_[i], t, config_);
}
} else {
#pragma omp parallel for schedule(static) reduction(+:sum_loss)
......@@ -79,7 +78,7 @@ public:
// add loss
double t = 0;
objective->ConvertOutput(&score[i], &t);
sum_loss += PointWiseLossCalculator::LossOnPoint(label_[i], t, huber_delta_, fair_c_) * weights_[i];
sum_loss += PointWiseLossCalculator::LossOnPoint(label_[i], t, config_) * weights_[i];
}
}
}
......@@ -91,13 +90,6 @@ public:
inline static double AverageLoss(double sum_loss, double sum_weights) {
return sum_loss / sum_weights;
}
protected:
/*! \brief delta for Huber loss */
double huber_delta_;
/*! \brief c for Fair loss */
double fair_c_;
private:
/*! \brief Number of data */
data_size_t num_data_;
......@@ -108,6 +100,7 @@ private:
/*! \brief Sum weights */
double sum_weights_;
/*! \brief Name of this test set */
MetricConfig config_;
std::vector<std::string> name_;
};
......@@ -116,7 +109,7 @@ class RMSEMetric: public RegressionMetric<RMSEMetric> {
public:
explicit RMSEMetric(const MetricConfig& config) :RegressionMetric<RMSEMetric>(config) {}
inline static double LossOnPoint(float label, double score, double, double) {
inline static double LossOnPoint(float label, double score, const MetricConfig&) {
return (score - label)*(score - label);
}
......@@ -135,26 +128,42 @@ class L2Metric: public RegressionMetric<L2Metric> {
public:
explicit L2Metric(const MetricConfig& config) :RegressionMetric<L2Metric>(config) {}
inline static double LossOnPoint(float label, double score, double, double) {
inline static double LossOnPoint(float label, double score, const MetricConfig&) {
return (score - label)*(score - label);
}
inline static double AverageLoss(double sum_loss, double sum_weights) {
// need mean of the result for L2 loss
return sum_loss / sum_weights;
inline static const char* Name() {
return "l2";
}
};
/*! \brief L2 loss for regression task */
class QuantileMetric : public RegressionMetric<QuantileMetric> {
public:
explicit QuantileMetric(const MetricConfig& config) :RegressionMetric<QuantileMetric>(config) {
}
inline static double LossOnPoint(float label, double score, const MetricConfig& config) {
double delta = label - score;
if (delta < 0) {
return (config.alpha - 1.0f) * delta;
} else {
return config.alpha * delta;
}
}
inline static const char* Name() {
return "l2";
return "quantile";
}
};
/*! \brief L1 loss for regression task */
class L1Metric: public RegressionMetric<L1Metric> {
public:
explicit L1Metric(const MetricConfig& config) :RegressionMetric<L1Metric>(config) {}
inline static double LossOnPoint(float label, double score, double, double) {
inline static double LossOnPoint(float label, double score, const MetricConfig&) {
return std::fabs(score - label);
}
inline static const char* Name() {
......@@ -166,15 +175,14 @@ public:
class HuberLossMetric: public RegressionMetric<HuberLossMetric> {
public:
explicit HuberLossMetric(const MetricConfig& config) :RegressionMetric<HuberLossMetric>(config) {
huber_delta_ = static_cast<double>(config.huber_delta);
}
inline static double LossOnPoint(float label, double score, double delta, double) {
inline static double LossOnPoint(float label, double score, const MetricConfig& config) {
const double diff = score - label;
if (std::abs(diff) <= delta) {
if (std::abs(diff) <= config.alpha) {
return 0.5f * diff * diff;
} else {
return delta * (std::abs(diff) - 0.5f * delta);
return config.alpha * (std::abs(diff) - 0.5f * config.alpha);
}
}
......@@ -188,11 +196,11 @@ public:
class FairLossMetric: public RegressionMetric<FairLossMetric> {
public:
explicit FairLossMetric(const MetricConfig& config) :RegressionMetric<FairLossMetric>(config) {
fair_c_ = static_cast<double>(config.fair_c);
}
inline static double LossOnPoint(float label, double score, double, double c) {
inline static double LossOnPoint(float label, double score, const MetricConfig& config) {
const double x = std::fabs(score - label);
const double c = config.fair_c;
return c * x - c * c * std::log(1.0f + x / c);
}
......@@ -207,7 +215,7 @@ public:
explicit PoissonMetric(const MetricConfig& config) :RegressionMetric<PoissonMetric>(config) {
}
inline static double LossOnPoint(float label, double score, double, double) {
inline static double LossOnPoint(float label, double score, const MetricConfig&) {
const double eps = 1e-10f;
if (score < eps) {
score = eps;
......
......@@ -13,6 +13,10 @@ ObjectiveFunction* ObjectiveFunction::CreateObjectiveFunction(const std::string&
return new RegressionL2loss(config);
} else if (type == std::string("regression_l1") || type == std::string("mean_absolute_error") || type == std::string("mae")) {
return new RegressionL1loss(config);
} else if (type == std::string("quantile")) {
return new RegressionQuantileloss(config);
} else if (type == std::string("quantile_l2")) {
return new RegressionQuantileL2loss(config);
} else if (type == std::string("huber")) {
return new RegressionHuberLoss(config);
} else if (type == std::string("fair")) {
......@@ -42,6 +46,10 @@ ObjectiveFunction* ObjectiveFunction::CreateObjectiveFunction(const std::string&
return new RegressionL2loss(strs);
} else if (type == std::string("regression_l1")) {
return new RegressionL1loss(strs);
} else if (type == std::string("quantile")) {
return new RegressionQuantileloss(strs);
} else if (type == std::string("quantile_l2")) {
return new RegressionQuantileL2loss(strs);
} else if (type == std::string("huber")) {
return new RegressionHuberLoss(strs);
} else if (type == std::string("fair")) {
......
......@@ -7,16 +7,23 @@
#include <LightGBM/utils/common.h>
namespace LightGBM {
/*!
* \brief Objective function for regression
*/
class RegressionL2loss: public ObjectiveFunction {
public:
explicit RegressionL2loss(const ObjectiveConfig&) {
explicit RegressionL2loss(const ObjectiveConfig& config) {
sqrt_ = config.reg_sqrt;
}
explicit RegressionL2loss(const std::vector<std::string>&) {
explicit RegressionL2loss(const std::vector<std::string>& strs) {
sqrt_ = false;
for (auto str : strs) {
if (str == std::string("sqrt")) {
sqrt_ = true;
}
}
}
~RegressionL2loss() {
......@@ -25,6 +32,13 @@ public:
void Init(const Metadata& metadata, data_size_t num_data) override {
num_data_ = num_data;
label_ = metadata.label();
if (sqrt_) {
trans_label_.resize(num_data_);
for (data_size_t i = 0; i < num_data; ++i) {
trans_label_[i] = std::copysign(std::sqrt(std::fabs(label_[i])), label_[i]);
}
label_ = trans_label_.data();
}
weights_ = metadata.weights();
}
......@@ -49,9 +63,20 @@ public:
return "regression";
}
void ConvertOutput(const double* input, double* output) const override {
if (sqrt_) {
output[0] = std::copysign(input[0] * input[0], input[0]);
} else {
output[0] = input[0];
}
}
std::string ToString() const override {
std::stringstream str_buf;
str_buf << GetName();
if (sqrt_) {
str_buf << " sqrt";
}
return str_buf.str();
}
......@@ -63,38 +88,40 @@ public:
}
}
bool BoostFromAverage() const override { return true; }
bool BoostFromAverage() const override {
if (sqrt_) {
return false;
} else {
return true;
}
}
private:
protected:
bool sqrt_;
/*! \brief Number of data */
data_size_t num_data_;
/*! \brief Pointer of label */
const float* label_;
/*! \brief Pointer of weights */
const float* weights_;
std::vector<float> trans_label_;
};
/*!
* \brief L1 regression loss
*/
class RegressionL1loss: public ObjectiveFunction {
class RegressionL1loss: public RegressionL2loss {
public:
explicit RegressionL1loss(const ObjectiveConfig& config) {
explicit RegressionL1loss(const ObjectiveConfig& config): RegressionL2loss(config) {
eta_ = static_cast<double>(config.gaussian_eta);
}
explicit RegressionL1loss(const std::vector<std::string>&) {
explicit RegressionL1loss(const std::vector<std::string>& strs): RegressionL2loss(strs) {
}
~RegressionL1loss() {}
void Init(const Metadata& metadata, data_size_t num_data) override {
num_data_ = num_data;
label_ = metadata.label();
weights_ = metadata.weights();
}
void GetGradients(const double* score, score_t* gradients,
score_t* hessians) const override {
if (weights_ == nullptr) {
......@@ -126,48 +153,31 @@ public:
return "regression_l1";
}
std::string ToString() const override {
std::stringstream str_buf;
str_buf << GetName();
return str_buf.str();
bool IsConstantHessian() const override {
return false;
}
bool BoostFromAverage() const override { return true; }
private:
/*! \brief Number of data */
data_size_t num_data_;
/*! \brief Pointer of label */
const float* label_;
/*! \brief Pointer of weights */
const float* weights_;
/*! \brief a parameter to control the width of Gaussian function to approximate hessian */
double eta_;
};
/*!
* \brief Huber regression loss
*/
class RegressionHuberLoss: public ObjectiveFunction {
class RegressionHuberLoss: public RegressionL2loss {
public:
explicit RegressionHuberLoss(const ObjectiveConfig& config) {
delta_ = static_cast<double>(config.huber_delta);
explicit RegressionHuberLoss(const ObjectiveConfig& config): RegressionL2loss(config) {
alpha_ = static_cast<double>(config.alpha);
eta_ = static_cast<double>(config.gaussian_eta);
}
explicit RegressionHuberLoss(const std::vector<std::string>&) {
explicit RegressionHuberLoss(const std::vector<std::string>& strs): RegressionL2loss(strs) {
}
~RegressionHuberLoss() {
}
void Init(const Metadata& metadata, data_size_t num_data) override {
num_data_ = num_data;
label_ = metadata.label();
weights_ = metadata.weights();
}
void GetGradients(const double* score, score_t* gradients,
score_t* hessians) const override {
if (weights_ == nullptr) {
......@@ -175,14 +185,14 @@ public:
for (data_size_t i = 0; i < num_data_; ++i) {
const double diff = score[i] - label_[i];
if (std::abs(diff) <= delta_) {
if (std::abs(diff) <= alpha_) {
gradients[i] = static_cast<score_t>(diff);
hessians[i] = 1.0f;
} else {
if (diff >= 0.0f) {
gradients[i] = static_cast<score_t>(delta_);
gradients[i] = static_cast<score_t>(alpha_);
} else {
gradients[i] = static_cast<score_t>(-delta_);
gradients[i] = static_cast<score_t>(-alpha_);
}
hessians[i] = static_cast<score_t>(Common::ApproximateHessianWithGaussian(score[i], label_[i], gradients[i], eta_));
}
......@@ -192,14 +202,14 @@ public:
for (data_size_t i = 0; i < num_data_; ++i) {
const double diff = score[i] - label_[i];
if (std::abs(diff) <= delta_) {
if (std::abs(diff) <= alpha_) {
gradients[i] = static_cast<score_t>(diff * weights_[i]);
hessians[i] = weights_[i];
} else {
if (diff >= 0.0f) {
gradients[i] = static_cast<score_t>(delta_ * weights_[i]);
gradients[i] = static_cast<score_t>(alpha_ * weights_[i]);
} else {
gradients[i] = static_cast<score_t>(-delta_ * weights_[i]);
gradients[i] = static_cast<score_t>(-alpha_ * weights_[i]);
}
hessians[i] = static_cast<score_t>(Common::ApproximateHessianWithGaussian(score[i], label_[i], gradients[i], eta_, weights_[i]));
}
......@@ -211,47 +221,31 @@ public:
return "huber";
}
std::string ToString() const override {
std::stringstream str_buf;
str_buf << GetName();
return str_buf.str();
bool IsConstantHessian() const override {
return false;
}
bool BoostFromAverage() const override { return true; }
private:
/*! \brief Number of data */
data_size_t num_data_;
/*! \brief Pointer of label */
const float* label_;
/*! \brief Pointer of weights */
const float* weights_;
/*! \brief delta for Huber loss */
double delta_;
double alpha_;
/*! \brief a parameter to control the width of Gaussian function to approximate hessian */
double eta_;
};
// http://research.microsoft.com/en-us/um/people/zhang/INRIA/Publis/Tutorial-Estim/node24.html
class RegressionFairLoss: public ObjectiveFunction {
class RegressionFairLoss: public RegressionL2loss {
public:
explicit RegressionFairLoss(const ObjectiveConfig& config) {
explicit RegressionFairLoss(const ObjectiveConfig& config): RegressionL2loss(config) {
c_ = static_cast<double>(config.fair_c);
}
explicit RegressionFairLoss(const std::vector<std::string>&) {
explicit RegressionFairLoss(const std::vector<std::string>& strs): RegressionL2loss(strs) {
}
~RegressionFairLoss() {}
void Init(const Metadata& metadata, data_size_t num_data) override {
num_data_ = num_data;
label_ = metadata.label();
weights_ = metadata.weights();
}
void GetGradients(const double* score, score_t* gradients,
score_t* hessians) const override {
if (weights_ == nullptr) {
......@@ -275,21 +269,11 @@ public:
return "fair";
}
std::string ToString() const override {
std::stringstream str_buf;
str_buf << GetName();
return str_buf.str();
bool IsConstantHessian() const override {
return false;
}
bool BoostFromAverage() const override { return true; }
private:
/*! \brief Number of data */
data_size_t num_data_;
/*! \brief Pointer of label */
const float* label_;
/*! \brief Pointer of weights */
const float* weights_;
/*! \brief c for Fair loss */
double c_;
};
......@@ -298,23 +282,20 @@ private:
/*!
* \brief Objective function for Poisson regression
*/
class RegressionPoissonLoss: public ObjectiveFunction {
class RegressionPoissonLoss: public RegressionL2loss {
public:
explicit RegressionPoissonLoss(const ObjectiveConfig& config) {
explicit RegressionPoissonLoss(const ObjectiveConfig& config): RegressionL2loss(config) {
max_delta_step_ = static_cast<double>(config.poisson_max_delta_step);
}
explicit RegressionPoissonLoss(const std::vector<std::string>&) {
explicit RegressionPoissonLoss(const std::vector<std::string>& strs): RegressionL2loss(strs) {
}
~RegressionPoissonLoss() {}
void Init(const Metadata& metadata, data_size_t num_data) override {
num_data_ = num_data;
label_ = metadata.label();
weights_ = metadata.weights();
RegressionL2loss::Init(metadata, num_data);
// Safety check of labels
float miny;
double sumy;
......@@ -356,6 +337,7 @@ public:
}
void ConvertOutput(const double* input, double* output) const override {
RegressionL2loss::ConvertOutput(input, output);
output[0] = std::exp(input[0]);
}
......@@ -363,14 +345,6 @@ public:
return "poisson";
}
std::string ToString() const override {
std::stringstream str_buf;
str_buf << GetName();
return str_buf.str();
}
bool BoostFromAverage() const override { return true; }
bool GetCustomAverage(double *initscore) const override {
if (initscore == nullptr) return false;
double sumw = 0.0f;
......@@ -392,16 +366,115 @@ public:
return true;
}
bool IsConstantHessian() const override {
return false;
}
private:
/*! \brief Number of data */
data_size_t num_data_;
/*! \brief Pointer of label */
const float* label_;
/*! \brief Pointer of weights */
const float* weights_;
/*! \brief used to safeguard optimization */
double max_delta_step_;
};
class RegressionQuantileloss : public RegressionL2loss {
public:
explicit RegressionQuantileloss(const ObjectiveConfig& config): RegressionL2loss(config) {
alpha_ = static_cast<score_t>(config.alpha);
}
explicit RegressionQuantileloss(const std::vector<std::string>& strs): RegressionL2loss(strs) {
}
~RegressionQuantileloss() {}
void GetGradients(const double* score, score_t* gradients,
score_t* hessians) const override {
if (weights_ == nullptr) {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data_; ++i) {
score_t delta = static_cast<score_t>(score[i] - label_[i]);
if (delta >= 0) {
gradients[i] = (1.0f - alpha_);
} else {
gradients[i] = -alpha_;
}
hessians[i] = 1.0f;
}
} else {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data_; ++i) {
score_t delta = static_cast<score_t>(score[i] - label_[i]);
if (delta >= 0) {
gradients[i] = (1.0f - alpha_) * weights_[i];
} else {
gradients[i] = -alpha_ * weights_[i];
}
hessians[i] = weights_[i];
}
}
}
const char* GetName() const override {
return "quantile";
}
private:
score_t alpha_;
};
class RegressionQuantileL2loss : public RegressionL2loss {
public:
explicit RegressionQuantileL2loss(const ObjectiveConfig& config) : RegressionL2loss(config) {
alpha_ = static_cast<score_t>(config.alpha);
}
explicit RegressionQuantileL2loss(const std::vector<std::string>& strs) : RegressionL2loss(strs) {
}
~RegressionQuantileL2loss() {}
void GetGradients(const double* score, score_t* gradients,
score_t* hessians) const override {
if (weights_ == nullptr) {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data_; ++i) {
score_t delta = static_cast<score_t>(score[i] - label_[i]);
if (delta > 0) {
gradients[i] = (1.0f - alpha_) * delta;
hessians[i] = (1.0f - alpha_);
} else {
gradients[i] = alpha_ * delta;
hessians[i] = alpha_;
}
}
} else {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data_; ++i) {
score_t delta = static_cast<score_t>(score[i] - label_[i]);
if (delta > 0) {
gradients[i] = (1.0f - alpha_) * delta * weights_[i];
hessians[i] = (1.0f - alpha_) * weights_[i];
} else {
gradients[i] = alpha_ * delta * weights_[i];
hessians[i] = alpha_ * weights_[i];
}
}
}
}
bool IsConstantHessian() const override {
return false;
}
const char* GetName() const override {
return "quantile_l2";
}
private:
score_t alpha_;
};
} // namespace LightGBM
#endif // LightGBM_OBJECTIVE_REGRESSION_OBJECTIVE_HPP_
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment