regression_objective.hpp 6.42 KB
Newer Older
Guolin Ke's avatar
Guolin Ke committed
1
2
3
4
#ifndef LIGHTGBM_OBJECTIVE_REGRESSION_OBJECTIVE_HPP_
#define LIGHTGBM_OBJECTIVE_REGRESSION_OBJECTIVE_HPP_

#include <LightGBM/objective_function.h>
5
#include <LightGBM/utils/common.h>
Guolin Ke's avatar
Guolin Ke committed
6
7
8

namespace LightGBM {
/*!
9
* \brief Objective function for regression
Guolin Ke's avatar
Guolin Ke committed
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
*/
class RegressionL2loss: public ObjectiveFunction {
public:
  explicit RegressionL2loss(const ObjectiveConfig&) {
  }

  ~RegressionL2loss() {
  }

  void Init(const Metadata& metadata, data_size_t num_data) override {
    num_data_ = num_data;
    label_ = metadata.label();
    weights_ = metadata.weights();
  }

  void GetGradients(const score_t* score, score_t* gradients,
Guolin Ke's avatar
Guolin Ke committed
26
    score_t* hessians) const override {
Guolin Ke's avatar
Guolin Ke committed
27
    if (weights_ == nullptr) {
Guolin Ke's avatar
Guolin Ke committed
28
#pragma omp parallel for schedule(static)
Guolin Ke's avatar
Guolin Ke committed
29
30
31
32
33
      for (data_size_t i = 0; i < num_data_; ++i) {
        gradients[i] = (score[i] - label_[i]);
        hessians[i] = 1.0;
      }
    } else {
Guolin Ke's avatar
Guolin Ke committed
34
#pragma omp parallel for schedule(static)
Guolin Ke's avatar
Guolin Ke committed
35
36
37
38
39
40
41
      for (data_size_t i = 0; i < num_data_; ++i) {
        gradients[i] = (score[i] - label_[i]) * weights_[i];
        hessians[i] = weights_[i];
      }
    }
  }

Guolin Ke's avatar
Guolin Ke committed
42
43
  const char* GetName() const override {
    return "regression";
Guolin Ke's avatar
Guolin Ke committed
44
45
46
47
48
49
50
51
52
53
54
  }

private:
  /*! \brief Number of data */
  data_size_t num_data_;
  /*! \brief Pointer of label */
  const float* label_;
  /*! \brief Pointer of weights */
  const float* weights_;
};

Guolin Ke's avatar
Guolin Ke committed
55
56
57
/*!
* \brief L1 regression loss
*/
58
59
class RegressionL1loss: public ObjectiveFunction {
public:
Guolin Ke's avatar
Guolin Ke committed
60
  explicit RegressionL1loss(const ObjectiveConfig&) {}
61
62
63
64
65
66
67
68
69
70

  ~RegressionL1loss() {}

  void Init(const Metadata& metadata, data_size_t num_data) override {
    num_data_ = num_data;
    label_ = metadata.label();
    weights_ = metadata.weights();
  }

  void GetGradients(const score_t* score, score_t* gradients,
Guolin Ke's avatar
Guolin Ke committed
71
    score_t* hessians) const override {
72
    if (weights_ == nullptr) {
Guolin Ke's avatar
Guolin Ke committed
73
#pragma omp parallel for schedule(static)
74
      for (data_size_t i = 0; i < num_data_; ++i) {
Guolin Ke's avatar
Guolin Ke committed
75
76
77
        const score_t diff = score[i] - label_[i];
        if (diff >= 0.0f) {
          gradients[i] = 1.0f;
78
        } else {
Guolin Ke's avatar
Guolin Ke committed
79
          gradients[i] = -1.0f;
80
        }
Guolin Ke's avatar
Guolin Ke committed
81
        hessians[i] = static_cast<score_t>(Common::ApproximateHessianWithGaussian(score[i], label_[i], gradients[i]));
82
83
      }
    } else {
Guolin Ke's avatar
Guolin Ke committed
84
#pragma omp parallel for schedule(static)
85
      for (data_size_t i = 0; i < num_data_; ++i) {
Guolin Ke's avatar
Guolin Ke committed
86
87
        const score_t diff = score[i] - label_[i];
        if (diff >= 0.0f) {
88
89
90
91
          gradients[i] = weights_[i];
        } else {
          gradients[i] = -weights_[i];
        }
Guolin Ke's avatar
Guolin Ke committed
92
        hessians[i] = static_cast<score_t>(Common::ApproximateHessianWithGaussian(score[i], label_[i], gradients[i], weights_[i]));
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
      }
    }
  }

  const char* GetName() const override {
    return "regression_l1";
  }

private:
  /*! \brief Number of data */
  data_size_t num_data_;
  /*! \brief Pointer of label */
  const float* label_;
  /*! \brief Pointer of weights */
  const float* weights_;
};

Guolin Ke's avatar
Guolin Ke committed
110
111
112
/*!
* \brief Huber regression loss
*/
Tsukasa OMOTO's avatar
Tsukasa OMOTO committed
113
class RegressionHuberLoss: public ObjectiveFunction {
Tsukasa OMOTO's avatar
Tsukasa OMOTO committed
114
public:
Tsukasa OMOTO's avatar
Tsukasa OMOTO committed
115
  explicit RegressionHuberLoss(const ObjectiveConfig& config) {
Guolin Ke's avatar
Guolin Ke committed
116
    delta_ = static_cast<score_t>(config.huber_delta);
Tsukasa OMOTO's avatar
Tsukasa OMOTO committed
117
118
  }

Tsukasa OMOTO's avatar
Tsukasa OMOTO committed
119
  ~RegressionHuberLoss() {
Tsukasa OMOTO's avatar
Tsukasa OMOTO committed
120
121
122
123
124
125
126
127
128
  }

  void Init(const Metadata& metadata, data_size_t num_data) override {
    num_data_ = num_data;
    label_ = metadata.label();
    weights_ = metadata.weights();
  }

  void GetGradients(const score_t* score, score_t* gradients,
Guolin Ke's avatar
Guolin Ke committed
129
    score_t* hessians) const override {
Tsukasa OMOTO's avatar
Tsukasa OMOTO committed
130
    if (weights_ == nullptr) {
Guolin Ke's avatar
Guolin Ke committed
131
#pragma omp parallel for schedule(static)
Tsukasa OMOTO's avatar
Tsukasa OMOTO committed
132
      for (data_size_t i = 0; i < num_data_; ++i) {
Guolin Ke's avatar
Guolin Ke committed
133
        const score_t diff = score[i] - label_[i];
Tsukasa OMOTO's avatar
Tsukasa OMOTO committed
134

Tsukasa OMOTO's avatar
Tsukasa OMOTO committed
135
136
        if (std::abs(diff) <= delta_) {
          gradients[i] = diff;
Guolin Ke's avatar
Guolin Ke committed
137
          hessians[i] = 1.0f;
Tsukasa OMOTO's avatar
Tsukasa OMOTO committed
138
        } else {
Guolin Ke's avatar
Guolin Ke committed
139
140
141
142
143
144
          if (diff >= 0.0f) {
            gradients[i] = delta_;
          } else {
            gradients[i] = -delta_;
          }
          hessians[i] = static_cast<score_t>(Common::ApproximateHessianWithGaussian(score[i], label_[i], gradients[i]));
Tsukasa OMOTO's avatar
Tsukasa OMOTO committed
145
146
147
        }
      }
    } else {
Guolin Ke's avatar
Guolin Ke committed
148
#pragma omp parallel for schedule(static)
Tsukasa OMOTO's avatar
Tsukasa OMOTO committed
149
      for (data_size_t i = 0; i < num_data_; ++i) {
Guolin Ke's avatar
Guolin Ke committed
150
        const score_t diff = score[i] - label_[i];
Tsukasa OMOTO's avatar
Tsukasa OMOTO committed
151

Tsukasa OMOTO's avatar
Tsukasa OMOTO committed
152
153
154
155
        if (std::abs(diff) <= delta_) {
          gradients[i] = diff * weights_[i];
          hessians[i] = weights_[i];
        } else {
Guolin Ke's avatar
Guolin Ke committed
156
157
158
159
160
161
          if (diff >= 0.0f) {
            gradients[i] = delta_ * weights_[i];
          } else {
            gradients[i] = -delta_ * weights_[i];
          }
          hessians[i] = static_cast<score_t>(Common::ApproximateHessianWithGaussian(score[i], label_[i], gradients[i], weights_[i]));
Tsukasa OMOTO's avatar
Tsukasa OMOTO committed
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
        }
      }
    }
  }

  const char* GetName() const override {
    return "huber";
  }

private:
  /*! \brief Number of data */
  data_size_t num_data_;
  /*! \brief Pointer of label */
  const float* label_;
  /*! \brief Pointer of weights */
  const float* weights_;
  /*! \brief delta for Huber loss */
Guolin Ke's avatar
Guolin Ke committed
179
  score_t delta_;
Tsukasa OMOTO's avatar
Tsukasa OMOTO committed
180
181
};

Tsukasa OMOTO's avatar
Tsukasa OMOTO committed
182
183
184
185
186

// http://research.microsoft.com/en-us/um/people/zhang/INRIA/Publis/Tutorial-Estim/node24.html
class RegressionFairLoss: public ObjectiveFunction {
public:
  explicit RegressionFairLoss(const ObjectiveConfig& config) {
Guolin Ke's avatar
Guolin Ke committed
187
    c_ = static_cast<score_t>(config.fair_c);
Tsukasa OMOTO's avatar
Tsukasa OMOTO committed
188
189
190
191
192
193
194
195
196
197
198
  }

  ~RegressionFairLoss() {}

  void Init(const Metadata& metadata, data_size_t num_data) override {
    num_data_ = num_data;
    label_ = metadata.label();
    weights_ = metadata.weights();
  }

  void GetGradients(const score_t* score, score_t* gradients,
Guolin Ke's avatar
Guolin Ke committed
199
    score_t* hessians) const override {
Tsukasa OMOTO's avatar
Tsukasa OMOTO committed
200
    if (weights_ == nullptr) {
Guolin Ke's avatar
Guolin Ke committed
201
#pragma omp parallel for schedule(static)
Tsukasa OMOTO's avatar
Tsukasa OMOTO committed
202
      for (data_size_t i = 0; i < num_data_; ++i) {
Guolin Ke's avatar
Guolin Ke committed
203
        const score_t x = score[i] - label_[i];
Tsukasa OMOTO's avatar
Tsukasa OMOTO committed
204
205
206
207
        gradients[i] = c_ * x / (std::fabs(x) + c_);
        hessians[i] = c_ * c_ / ((std::fabs(x) + c_) * (std::fabs(x) + c_));
      }
    } else {
Guolin Ke's avatar
Guolin Ke committed
208
#pragma omp parallel for schedule(static)
Tsukasa OMOTO's avatar
Tsukasa OMOTO committed
209
      for (data_size_t i = 0; i < num_data_; ++i) {
Guolin Ke's avatar
Guolin Ke committed
210
        const score_t x = score[i] - label_[i];
Tsukasa OMOTO's avatar
Tsukasa OMOTO committed
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
        gradients[i] = c_ * x / (std::fabs(x) + c_);
        gradients[i] *= weights_[i];
        hessians[i] = c_ * c_ / ((std::fabs(x) + c_) * (std::fabs(x) + c_));
        hessians[i] *= weights_[i];
      }
    }
  }

  const char* GetName() const override {
    return "fair";
  }

private:
  /*! \brief Number of data */
  data_size_t num_data_;
  /*! \brief Pointer of label */
  const float* label_;
  /*! \brief Pointer of weights */
  const float* weights_;
  /*! \brief c for Fair loss */
Guolin Ke's avatar
Guolin Ke committed
231
  score_t c_;
Tsukasa OMOTO's avatar
Tsukasa OMOTO committed
232
233
};

Guolin Ke's avatar
Guolin Ke committed
234
}  // namespace LightGBM
Guolin Ke's avatar
Guolin Ke committed
235
#endif   // LightGBM_OBJECTIVE_REGRESSION_OBJECTIVE_HPP_