regression_objective.hpp 26.5 KB
Newer Older
1
2
3
4
/*!
 * Copyright (c) 2016 Microsoft Corporation. All rights reserved.
 * Licensed under the MIT License. See LICENSE file in the project root for license information.
 */
Guolin Ke's avatar
Guolin Ke committed
5
6
7
#ifndef LIGHTGBM_OBJECTIVE_REGRESSION_OBJECTIVE_HPP_
#define LIGHTGBM_OBJECTIVE_REGRESSION_OBJECTIVE_HPP_

8
9
10
11
#include <LightGBM/meta.h>
#include <LightGBM/objective_function.h>
#include <LightGBM/utils/array_args.h>

12
13
14
15
#include <string>
#include <algorithm>
#include <vector>

Guolin Ke's avatar
Guolin Ke committed
16
namespace LightGBM {
17

Guolin Ke's avatar
Guolin Ke committed
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
#define PercentileFun(T, data_reader, cnt_data, alpha)                    \
  {                                                                       \
    if (cnt_data <= 1) {                                                  \
      return data_reader(0);                                              \
    }                                                                     \
    std::vector<T> ref_data(cnt_data);                                    \
    for (data_size_t i = 0; i < cnt_data; ++i) {                          \
      ref_data[i] = data_reader(i);                                       \
    }                                                                     \
    const double float_pos = (1.0f - alpha) * cnt_data;                   \
    const data_size_t pos = static_cast<data_size_t>(float_pos);          \
    if (pos < 1) {                                                        \
      return ref_data[ArrayArgs<T>::ArgMax(ref_data)];                    \
    } else if (pos >= cnt_data) {                                         \
      return ref_data[ArrayArgs<T>::ArgMin(ref_data)];                    \
    } else {                                                              \
      const double bias = float_pos - pos;                                \
      if (pos > cnt_data / 2) {                                           \
        ArrayArgs<T>::ArgMaxAtK(&ref_data, 0, cnt_data, pos - 1);         \
        T v1 = ref_data[pos - 1];                                         \
        T v2 = ref_data[pos + ArrayArgs<T>::ArgMax(ref_data.data() + pos, \
                                                   cnt_data - pos)];      \
        return static_cast<T>(v1 - (v1 - v2) * bias);                     \
      } else {                                                            \
        ArrayArgs<T>::ArgMaxAtK(&ref_data, 0, cnt_data, pos);             \
        T v2 = ref_data[pos];                                             \
        T v1 = ref_data[ArrayArgs<T>::ArgMin(ref_data.data(), pos)];      \
        return static_cast<T>(v1 - (v1 - v2) * bias);                     \
      }                                                                   \
    }                                                                     \
48
49
  }\

Guolin Ke's avatar
Guolin Ke committed
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
#define WeightedPercentileFun(T, data_reader, weight_reader, cnt_data, alpha) \
  {                                                                           \
    if (cnt_data <= 1) {                                                      \
      return data_reader(0);                                                  \
    }                                                                         \
    std::vector<data_size_t> sorted_idx(cnt_data);                            \
    for (data_size_t i = 0; i < cnt_data; ++i) {                              \
      sorted_idx[i] = i;                                                      \
    }                                                                         \
    std::stable_sort(sorted_idx.begin(), sorted_idx.end(),                    \
                     [&](data_size_t a, data_size_t b) {                      \
                       return data_reader(a) < data_reader(b);                \
                     });                                                      \
    std::vector<double> weighted_cdf(cnt_data);                               \
    weighted_cdf[0] = weight_reader(sorted_idx[0]);                           \
    for (data_size_t i = 1; i < cnt_data; ++i) {                              \
      weighted_cdf[i] = weighted_cdf[i - 1] + weight_reader(sorted_idx[i]);   \
    }                                                                         \
    double threshold = weighted_cdf[cnt_data - 1] * alpha;                    \
    size_t pos = std::upper_bound(weighted_cdf.begin(), weighted_cdf.end(),   \
                                  threshold) -                                \
                 weighted_cdf.begin();                                        \
    pos = std::min(pos, static_cast<size_t>(cnt_data - 1));                   \
    if (pos == 0 || pos == static_cast<size_t>(cnt_data - 1)) {               \
      return data_reader(sorted_idx[pos]);                                    \
    }                                                                         \
Nikita Titov's avatar
Nikita Titov committed
76
77
    CHECK_GE(threshold, weighted_cdf[pos - 1]);                               \
    CHECK_LT(threshold, weighted_cdf[pos]);                                   \
Guolin Ke's avatar
Guolin Ke committed
78
79
80
81
82
83
84
85
86
87
    T v1 = data_reader(sorted_idx[pos - 1]);                                  \
    T v2 = data_reader(sorted_idx[pos]);                                      \
    if (weighted_cdf[pos + 1] - weighted_cdf[pos] >= 1.0f) {                  \
      return static_cast<T>((threshold - weighted_cdf[pos]) /                 \
                                (weighted_cdf[pos + 1] - weighted_cdf[pos]) * \
                                (v2 - v1) +                                   \
                            v1);                                              \
    } else {                                                                  \
      return static_cast<T>(v2);                                              \
    }                                                                         \
Guolin Ke's avatar
Guolin Ke committed
88
  }\
89

Guolin Ke's avatar
Guolin Ke committed
90
/*!
91
* \brief Objective function for regression
Guolin Ke's avatar
Guolin Ke committed
92
93
*/
class RegressionL2loss: public ObjectiveFunction {
Nikita Titov's avatar
Nikita Titov committed
94
 public:
Guolin Ke's avatar
Guolin Ke committed
95
  explicit RegressionL2loss(const Config& config) {
96
    sqrt_ = config.reg_sqrt;
Guolin Ke's avatar
Guolin Ke committed
97
98
  }

99
100
101
102
103
104
105
  explicit RegressionL2loss(const std::vector<std::string>& strs) {
    sqrt_ = false;
    for (auto str : strs) {
      if (str == std::string("sqrt")) {
        sqrt_ = true;
      }
    }
106
  }
107

Guolin Ke's avatar
Guolin Ke committed
108
109
110
111
112
113
  ~RegressionL2loss() {
  }

  void Init(const Metadata& metadata, data_size_t num_data) override {
    num_data_ = num_data;
    label_ = metadata.label();
114
115
    if (sqrt_) {
      trans_label_.resize(num_data_);
116
      #pragma omp parallel for schedule(static)
117
      for (data_size_t i = 0; i < num_data; ++i) {
118
        trans_label_[i] = Common::Sign(label_[i]) * std::sqrt(std::fabs(label_[i]));
119
120
121
      }
      label_ = trans_label_.data();
    }
Guolin Ke's avatar
Guolin Ke committed
122
123
124
    weights_ = metadata.weights();
  }

125
126
  void GetGradients(const double* score, score_t* gradients,
                    score_t* hessians) const override {
Guolin Ke's avatar
Guolin Ke committed
127
    if (weights_ == nullptr) {
128
      #pragma omp parallel for schedule(static)
Guolin Ke's avatar
Guolin Ke committed
129
      for (data_size_t i = 0; i < num_data_; ++i) {
130
        gradients[i] = static_cast<score_t>(score[i] - label_[i]);
131
        hessians[i] = 1.0f;
Guolin Ke's avatar
Guolin Ke committed
132
133
      }
    } else {
134
      #pragma omp parallel for schedule(static)
Guolin Ke's avatar
Guolin Ke committed
135
      for (data_size_t i = 0; i < num_data_; ++i) {
136
137
        gradients[i] = static_cast<score_t>((score[i] - label_[i]) * weights_[i]);
        hessians[i] = static_cast<score_t>(weights_[i]);
Guolin Ke's avatar
Guolin Ke committed
138
139
140
141
      }
    }
  }

Guolin Ke's avatar
Guolin Ke committed
142
143
  const char* GetName() const override {
    return "regression";
Guolin Ke's avatar
Guolin Ke committed
144
145
  }

146
147
  void ConvertOutput(const double* input, double* output) const override {
    if (sqrt_) {
148
      output[0] = Common::Sign(input[0]) * input[0] * input[0];
149
150
151
152
153
    } else {
      output[0] = input[0];
    }
  }

154
155
156
  std::string ToString() const override {
    std::stringstream str_buf;
    str_buf << GetName();
157
158
159
    if (sqrt_) {
      str_buf << " sqrt";
    }
160
161
162
    return str_buf.str();
  }

163
164
165
166
167
168
169
170
  bool IsConstantHessian() const override {
    if (weights_ == nullptr) {
      return true;
    } else {
      return false;
    }
  }

171
  double BoostFromScore(int) const override {
172
173
174
    double suml = 0.0f;
    double sumw = 0.0f;
    if (weights_ != nullptr) {
175
      #pragma omp parallel for schedule(static) reduction(+:suml, sumw)
176
177
178
179
      for (data_size_t i = 0; i < num_data_; ++i) {
        suml += label_[i] * weights_[i];
        sumw += weights_[i];
      }
180
    } else {
181
182
183
184
185
      sumw = static_cast<double>(num_data_);
      #pragma omp parallel for schedule(static) reduction(+:suml)
      for (data_size_t i = 0; i < num_data_; ++i) {
        suml += label_[i];
      }
186
    }
187
    return suml / sumw;
188
  }
189

Nikita Titov's avatar
Nikita Titov committed
190
 protected:
191
  bool sqrt_;
Guolin Ke's avatar
Guolin Ke committed
192
193
194
  /*! \brief Number of data */
  data_size_t num_data_;
  /*! \brief Pointer of label */
195
  const label_t* label_;
Guolin Ke's avatar
Guolin Ke committed
196
  /*! \brief Pointer of weights */
197
198
  const label_t* weights_;
  std::vector<label_t> trans_label_;
Guolin Ke's avatar
Guolin Ke committed
199
200
};

Guolin Ke's avatar
Guolin Ke committed
201
202
203
/*!
* \brief L1 regression loss
*/
204
class RegressionL1loss: public RegressionL2loss {
Nikita Titov's avatar
Nikita Titov committed
205
 public:
Guolin Ke's avatar
Guolin Ke committed
206
  explicit RegressionL1loss(const Config& config): RegressionL2loss(config) {
207
  }
208

209
  explicit RegressionL1loss(const std::vector<std::string>& strs): RegressionL2loss(strs) {
210
211
  }

212
213
  ~RegressionL1loss() {}

214
215
  void GetGradients(const double* score, score_t* gradients,
                    score_t* hessians) const override {
216
    if (weights_ == nullptr) {
217
      #pragma omp parallel for schedule(static)
218
      for (data_size_t i = 0; i < num_data_; ++i) {
219
        const double diff = score[i] - label_[i];
220
221
        gradients[i] = static_cast<score_t>(Common::Sign(diff));
        hessians[i] = 1.0f;
222
223
      }
    } else {
224
      #pragma omp parallel for schedule(static)
225
      for (data_size_t i = 0; i < num_data_; ++i) {
226
        const double diff = score[i] - label_[i];
227
228
        gradients[i] = static_cast<score_t>(Common::Sign(diff) * weights_[i]);
        hessians[i] = weights_[i];
229
230
231
232
      }
    }
  }

233
  double BoostFromScore(int) const override {
234
235
236
237
238
239
240
241
242
243
244
245
    const double alpha = 0.5;
    if (weights_ != nullptr) {
      #define data_reader(i) (label_[i])
      #define weight_reader(i) (weights_[i])
      WeightedPercentileFun(label_t, data_reader, weight_reader, num_data_, alpha);
      #undef data_reader
      #undef weight_reader
    } else {
      #define data_reader(i) (label_[i])
      PercentileFun(label_t, data_reader, num_data_, alpha);
      #undef data_reader
    }
246
247
  }

248
249
  bool IsRenewTreeOutput() const override { return true; }

250
  double RenewTreeOutput(double, std::function<double(const label_t*, int)> residual_getter,
251
252
253
254
255
256
                         const data_size_t* index_mapper,
                         const data_size_t* bagging_mapper,
                         data_size_t num_data_in_leaf) const override {
    const double alpha = 0.5;
    if (weights_ == nullptr) {
      if (bagging_mapper == nullptr) {
257
        #define data_reader(i) (residual_getter(label_, index_mapper[i]))
258
259
260
        PercentileFun(double, data_reader, num_data_in_leaf, alpha);
        #undef data_reader
      } else {
261
        #define data_reader(i) (residual_getter(label_, bagging_mapper[index_mapper[i]]))
262
263
264
265
266
        PercentileFun(double, data_reader, num_data_in_leaf, alpha);
        #undef data_reader
      }
    } else {
      if (bagging_mapper == nullptr) {
267
        #define data_reader(i) (residual_getter(label_, index_mapper[i]))
268
269
270
271
272
        #define weight_reader(i) (weights_[index_mapper[i]])
        WeightedPercentileFun(double, data_reader, weight_reader, num_data_in_leaf, alpha);
        #undef data_reader
        #undef weight_reader
      } else {
273
        #define data_reader(i) (residual_getter(label_, bagging_mapper[index_mapper[i]]))
Guolin Ke's avatar
Guolin Ke committed
274
275
276
277
278
279
280
281
        #define weight_reader(i) (weights_[bagging_mapper[index_mapper[i]]])
        WeightedPercentileFun(double, data_reader, weight_reader, num_data_in_leaf, alpha);
        #undef data_reader
        #undef weight_reader
      }
    }
  }

282
283
284
  const char* GetName() const override {
    return "regression_l1";
  }
285
286
};

Guolin Ke's avatar
Guolin Ke committed
287
288
289
/*!
* \brief Huber regression loss
*/
290
class RegressionHuberLoss: public RegressionL2loss {
Nikita Titov's avatar
Nikita Titov committed
291
 public:
Guolin Ke's avatar
Guolin Ke committed
292
  explicit RegressionHuberLoss(const Config& config): RegressionL2loss(config) {
293
    alpha_ = static_cast<double>(config.alpha);
Guolin Ke's avatar
Guolin Ke committed
294
295
296
297
    if (sqrt_) {
      Log::Warning("Cannot use sqrt transform in %s Regression, will auto disable it", GetName());
      sqrt_ = false;
    }
Tsukasa OMOTO's avatar
Tsukasa OMOTO committed
298
299
  }

300
  explicit RegressionHuberLoss(const std::vector<std::string>& strs): RegressionL2loss(strs) {
Guolin Ke's avatar
Guolin Ke committed
301
302
303
304
    if (sqrt_) {
      Log::Warning("Cannot use sqrt transform in %s Regression, will auto disable it", GetName());
      sqrt_ = false;
    }
305
306
  }

Tsukasa OMOTO's avatar
Tsukasa OMOTO committed
307
  ~RegressionHuberLoss() {
Tsukasa OMOTO's avatar
Tsukasa OMOTO committed
308
309
  }

310
311
  void GetGradients(const double* score, score_t* gradients,
                    score_t* hessians) const override {
Tsukasa OMOTO's avatar
Tsukasa OMOTO committed
312
    if (weights_ == nullptr) {
313
      #pragma omp parallel for schedule(static)
Tsukasa OMOTO's avatar
Tsukasa OMOTO committed
314
      for (data_size_t i = 0; i < num_data_; ++i) {
315
        const double diff = score[i] - label_[i];
316
        if (std::abs(diff) <= alpha_) {
317
          gradients[i] = static_cast<score_t>(diff);
Tsukasa OMOTO's avatar
Tsukasa OMOTO committed
318
        } else {
319
          gradients[i] = static_cast<score_t>(Common::Sign(diff) * alpha_);
Tsukasa OMOTO's avatar
Tsukasa OMOTO committed
320
        }
321
        hessians[i] = 1.0f;
Tsukasa OMOTO's avatar
Tsukasa OMOTO committed
322
323
      }
    } else {
324
      #pragma omp parallel for schedule(static)
Tsukasa OMOTO's avatar
Tsukasa OMOTO committed
325
      for (data_size_t i = 0; i < num_data_; ++i) {
326
        const double diff = score[i] - label_[i];
327
        if (std::abs(diff) <= alpha_) {
328
          gradients[i] = static_cast<score_t>(diff * weights_[i]);
Tsukasa OMOTO's avatar
Tsukasa OMOTO committed
329
        } else {
330
          gradients[i] = static_cast<score_t>(Common::Sign(diff) * weights_[i] * alpha_);
Tsukasa OMOTO's avatar
Tsukasa OMOTO committed
331
        }
332
        hessians[i] = static_cast<score_t>(weights_[i]);
Tsukasa OMOTO's avatar
Tsukasa OMOTO committed
333
334
335
336
337
338
339
340
      }
    }
  }

  const char* GetName() const override {
    return "huber";
  }

Nikita Titov's avatar
Nikita Titov committed
341
 private:
Tsukasa OMOTO's avatar
Tsukasa OMOTO committed
342
  /*! \brief delta for Huber loss */
343
  double alpha_;
Tsukasa OMOTO's avatar
Tsukasa OMOTO committed
344
345
};

Tsukasa OMOTO's avatar
Tsukasa OMOTO committed
346
347

// http://research.microsoft.com/en-us/um/people/zhang/INRIA/Publis/Tutorial-Estim/node24.html
348
class RegressionFairLoss: public RegressionL2loss {
Nikita Titov's avatar
Nikita Titov committed
349
 public:
Guolin Ke's avatar
Guolin Ke committed
350
  explicit RegressionFairLoss(const Config& config): RegressionL2loss(config) {
351
    c_ = static_cast<double>(config.fair_c);
Tsukasa OMOTO's avatar
Tsukasa OMOTO committed
352
353
  }

354
  explicit RegressionFairLoss(const std::vector<std::string>& strs): RegressionL2loss(strs) {
355
356
  }

Tsukasa OMOTO's avatar
Tsukasa OMOTO committed
357
358
  ~RegressionFairLoss() {}

359
360
  void GetGradients(const double* score, score_t* gradients,
                    score_t* hessians) const override {
Tsukasa OMOTO's avatar
Tsukasa OMOTO committed
361
    if (weights_ == nullptr) {
362
      #pragma omp parallel for schedule(static)
Tsukasa OMOTO's avatar
Tsukasa OMOTO committed
363
      for (data_size_t i = 0; i < num_data_; ++i) {
364
        const double x = score[i] - label_[i];
365
366
        gradients[i] = static_cast<score_t>(c_ * x / (std::fabs(x) + c_));
        hessians[i] = static_cast<score_t>(c_ * c_ / ((std::fabs(x) + c_) * (std::fabs(x) + c_)));
Tsukasa OMOTO's avatar
Tsukasa OMOTO committed
367
368
      }
    } else {
369
      #pragma omp parallel for schedule(static)
Tsukasa OMOTO's avatar
Tsukasa OMOTO committed
370
      for (data_size_t i = 0; i < num_data_; ++i) {
371
        const double x = score[i] - label_[i];
372
373
        gradients[i] = static_cast<score_t>(c_ * x / (std::fabs(x) + c_) * weights_[i]);
        hessians[i] = static_cast<score_t>(c_ * c_ / ((std::fabs(x) + c_) * (std::fabs(x) + c_)) * weights_[i]);
Tsukasa OMOTO's avatar
Tsukasa OMOTO committed
374
375
376
377
378
379
380
381
      }
    }
  }

  const char* GetName() const override {
    return "fair";
  }

382
383
  bool IsConstantHessian() const override {
    return false;
384
385
  }

Nikita Titov's avatar
Nikita Titov committed
386
 private:
Tsukasa OMOTO's avatar
Tsukasa OMOTO committed
387
  /*! \brief c for Fair loss */
388
  double c_;
Tsukasa OMOTO's avatar
Tsukasa OMOTO committed
389
390
};

391
392
393
394

/*!
* \brief Objective function for Poisson regression
*/
395
class RegressionPoissonLoss: public RegressionL2loss {
Nikita Titov's avatar
Nikita Titov committed
396
 public:
Guolin Ke's avatar
Guolin Ke committed
397
  explicit RegressionPoissonLoss(const Config& config): RegressionL2loss(config) {
398
    max_delta_step_ = static_cast<double>(config.poisson_max_delta_step);
399
    if (sqrt_) {
400
      Log::Warning("Cannot use sqrt transform in %s Regression, will auto disable it", GetName());
401
402
      sqrt_ = false;
    }
403
404
  }

405
  explicit RegressionPoissonLoss(const std::vector<std::string>& strs): RegressionL2loss(strs) {
406
407
  }

408
409
410
  ~RegressionPoissonLoss() {}

  void Init(const Metadata& metadata, data_size_t num_data) override {
411
    if (sqrt_) {
412
      Log::Warning("Cannot use sqrt transform in %s Regression, will auto disable it", GetName());
413
414
      sqrt_ = false;
    }
415
    RegressionL2loss::Init(metadata, num_data);
416
    // Safety check of labels
417
    label_t miny;
418
    double sumy;
419
    Common::ObtainMinMaxSum(label_, num_data_, &miny, static_cast<label_t*>(nullptr), &sumy);
420
    if (miny < 0.0f) {
421
      Log::Fatal("[%s]: at least one target label is negative", GetName());
422
423
    }
    if (sumy == 0.0f) {
424
      Log::Fatal("[%s]: sum of labels is zero", GetName());
425
    }
426
427
  }

428
429
430
431
432
433
434
435
436
  /* Parametrize with unbounded internal score "f"; then
   *  loss = exp(f) - label * f
   *  grad = exp(f) - label
   *  hess = exp(f)
   *
   * And the output is exp(f); so the associated metric get s=exp(f)
   * so that its loss = s - label * log(s); a little awkward maybe.
   *
   */
437
438
  void GetGradients(const double* score, score_t* gradients,
                    score_t* hessians) const override {
439
    if (weights_ == nullptr) {
440
      #pragma omp parallel for schedule(static)
441
      for (data_size_t i = 0; i < num_data_; ++i) {
442
443
        gradients[i] = static_cast<score_t>(std::exp(score[i]) - label_[i]);
        hessians[i] = static_cast<score_t>(std::exp(score[i] + max_delta_step_));
444
445
      }
    } else {
446
      #pragma omp parallel for schedule(static)
447
      for (data_size_t i = 0; i < num_data_; ++i) {
448
449
        gradients[i] = static_cast<score_t>((std::exp(score[i]) - label_[i]) * weights_[i]);
        hessians[i] = static_cast<score_t>(std::exp(score[i] + max_delta_step_) * weights_[i]);
450
451
452
453
      }
    }
  }

454
455
456
457
  void ConvertOutput(const double* input, double* output) const override {
    output[0] = std::exp(input[0]);
  }

458
459
460
461
  const char* GetName() const override {
    return "poisson";
  }

462
  double BoostFromScore(int) const override {
Guolin Ke's avatar
Guolin Ke committed
463
    return Common::SafeLog(RegressionL2loss::BoostFromScore(0));
464
465
  }

466
467
468
469
  bool IsConstantHessian() const override {
    return false;
  }

Nikita Titov's avatar
Nikita Titov committed
470
 private:
471
472
473
474
  /*! \brief used to safeguard optimization */
  double max_delta_step_;
};

475
class RegressionQuantileloss : public RegressionL2loss {
Nikita Titov's avatar
Nikita Titov committed
476
 public:
Guolin Ke's avatar
Guolin Ke committed
477
  explicit RegressionQuantileloss(const Config& config): RegressionL2loss(config) {
478
    alpha_ = static_cast<score_t>(config.alpha);
Guolin Ke's avatar
Guolin Ke committed
479
    CHECK(alpha_ > 0 && alpha_ < 1);
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
  }

  explicit RegressionQuantileloss(const std::vector<std::string>& strs): RegressionL2loss(strs) {
  }

  ~RegressionQuantileloss() {}

  void GetGradients(const double* score, score_t* gradients,
                    score_t* hessians) const override {
    if (weights_ == nullptr) {
      #pragma omp parallel for schedule(static)
      for (data_size_t i = 0; i < num_data_; ++i) {
        score_t delta = static_cast<score_t>(score[i] - label_[i]);
        if (delta >= 0) {
          gradients[i] = (1.0f - alpha_);
        } else {
          gradients[i] = -alpha_;
        }
        hessians[i] = 1.0f;
      }
    } else {
      #pragma omp parallel for schedule(static)
      for (data_size_t i = 0; i < num_data_; ++i) {
        score_t delta = static_cast<score_t>(score[i] - label_[i]);
        if (delta >= 0) {
505
          gradients[i] = static_cast<score_t>((1.0f - alpha_) * weights_[i]);
506
        } else {
507
          gradients[i] = static_cast<score_t>(-alpha_ * weights_[i]);
508
        }
509
        hessians[i] = static_cast<score_t>(weights_[i]);
510
511
512
513
514
515
516
517
      }
    }
  }

  const char* GetName() const override {
    return "quantile";
  }

518
  double BoostFromScore(int) const override {
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
    if (weights_ != nullptr) {
      #define data_reader(i) (label_[i])
      #define weight_reader(i) (weights_[i])
      WeightedPercentileFun(label_t, data_reader, weight_reader, num_data_, alpha_);
      #undef data_reader
      #undef weight_reader
    } else {
      #define data_reader(i) (label_[i])
      PercentileFun(label_t, data_reader, num_data_, alpha_);
      #undef data_reader
    }
  }

  bool IsRenewTreeOutput() const override { return true; }

534
  double RenewTreeOutput(double, std::function<double(const label_t*, int)> residual_getter,
Guolin Ke's avatar
Guolin Ke committed
535
536
537
538
539
                         const data_size_t* index_mapper,
                         const data_size_t* bagging_mapper,
                         data_size_t num_data_in_leaf) const override {
    if (weights_ == nullptr) {
      if (bagging_mapper == nullptr) {
540
        #define data_reader(i) (residual_getter(label_, index_mapper[i]))
Guolin Ke's avatar
Guolin Ke committed
541
542
543
        PercentileFun(double, data_reader, num_data_in_leaf, alpha_);
        #undef data_reader
      } else {
544
        #define data_reader(i) (residual_getter(label_, bagging_mapper[index_mapper[i]]))
Guolin Ke's avatar
Guolin Ke committed
545
546
547
548
549
        PercentileFun(double, data_reader, num_data_in_leaf, alpha_);
        #undef data_reader
      }
    } else {
      if (bagging_mapper == nullptr) {
550
        #define data_reader(i) (residual_getter(label_, index_mapper[i]))
Guolin Ke's avatar
Guolin Ke committed
551
552
553
554
555
        #define weight_reader(i) (weights_[index_mapper[i]])
        WeightedPercentileFun(double, data_reader, weight_reader, num_data_in_leaf, alpha_);
        #undef data_reader
        #undef weight_reader
      } else {
556
        #define data_reader(i) (residual_getter(label_, bagging_mapper[index_mapper[i]]))
Guolin Ke's avatar
Guolin Ke committed
557
558
559
560
561
562
563
564
        #define weight_reader(i) (weights_[bagging_mapper[index_mapper[i]]])
        WeightedPercentileFun(double, data_reader, weight_reader, num_data_in_leaf, alpha_);
        #undef data_reader
        #undef weight_reader
      }
    }
  }

Nikita Titov's avatar
Nikita Titov committed
565
 private:
566
567
568
  score_t alpha_;
};

569
570
571
572
573

/*!
* \brief Mape Regression Loss
*/
class RegressionMAPELOSS : public RegressionL1loss {
Nikita Titov's avatar
Nikita Titov committed
574
 public:
Guolin Ke's avatar
Guolin Ke committed
575
  explicit RegressionMAPELOSS(const Config& config) : RegressionL1loss(config) {
576
577
  }

578
  explicit RegressionMAPELOSS(const std::vector<std::string>& strs) : RegressionL1loss(strs) {
579
580
  }

581
582
583
584
585
586
  ~RegressionMAPELOSS() {}

  void Init(const Metadata& metadata, data_size_t num_data) override {
    RegressionL2loss::Init(metadata, num_data);
    for (data_size_t i = 0; i < num_data_; ++i) {
      if (std::fabs(label_[i]) < 1) {
587
        Log::Warning("Met 'abs(label) < 1', will convert them to '1' in MAPE objective and metric");
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
        break;
      }
    }
    label_weight_.resize(num_data);
    if (weights_ == nullptr) {
      #pragma omp parallel for schedule(static)
      for (data_size_t i = 0; i < num_data_; ++i) {
        label_weight_[i] = 1.0f / std::max(1.0f, std::fabs(label_[i]));
      }
    } else {
      #pragma omp parallel for schedule(static)
      for (data_size_t i = 0; i < num_data_; ++i) {
        label_weight_[i] = 1.0f / std::max(1.0f, std::fabs(label_[i])) * weights_[i];
      }
    }
  }
604
605
606
607
608
609

  void GetGradients(const double* score, score_t* gradients,
                    score_t* hessians) const override {
    if (weights_ == nullptr) {
      #pragma omp parallel for schedule(static)
      for (data_size_t i = 0; i < num_data_; ++i) {
610
611
612
        const double diff = score[i] - label_[i];
        gradients[i] = static_cast<score_t>(Common::Sign(diff) * label_weight_[i]);
        hessians[i] = 1.0f;
613
614
615
616
      }
    } else {
      #pragma omp parallel for schedule(static)
      for (data_size_t i = 0; i < num_data_; ++i) {
617
618
619
        const double diff = score[i] - label_[i];
        gradients[i] = static_cast<score_t>(Common::Sign(diff) * label_weight_[i]);
        hessians[i] = weights_[i];
620
621
622
623
      }
    }
  }

624
  double BoostFromScore(int) const override {
625
626
627
628
629
630
631
632
633
634
    const double alpha = 0.5;
    #define data_reader(i) (label_[i])
    #define weight_reader(i) (label_weight_[i])
    WeightedPercentileFun(label_t, data_reader, weight_reader, num_data_, alpha);
    #undef data_reader
    #undef weight_reader
  }

  bool IsRenewTreeOutput() const override { return true; }

635
  double RenewTreeOutput(double, std::function<double(const label_t*, int)> residual_getter,
Guolin Ke's avatar
Guolin Ke committed
636
637
638
639
640
                         const data_size_t* index_mapper,
                         const data_size_t* bagging_mapper,
                         data_size_t num_data_in_leaf) const override {
    const double alpha = 0.5;
    if (bagging_mapper == nullptr) {
641
      #define data_reader(i) (residual_getter(label_, index_mapper[i]))
Guolin Ke's avatar
Guolin Ke committed
642
643
644
645
646
      #define weight_reader(i) (label_weight_[index_mapper[i]])
      WeightedPercentileFun(double, data_reader, weight_reader, num_data_in_leaf, alpha);
      #undef data_reader
      #undef weight_reader
    } else {
647
      #define data_reader(i) (residual_getter(label_, bagging_mapper[index_mapper[i]]))
Guolin Ke's avatar
Guolin Ke committed
648
649
      #define weight_reader(i) (label_weight_[bagging_mapper[index_mapper[i]]])
      WeightedPercentileFun(double, data_reader, weight_reader, num_data_in_leaf, alpha);
650
651
652
      #undef data_reader
      #undef weight_reader
    }
653
654
655
  }

  const char* GetName() const override {
656
657
658
659
660
    return "mape";
  }

  bool IsConstantHessian() const override {
    return true;
661
662
  }

Nikita Titov's avatar
Nikita Titov committed
663
 private:
664
  std::vector<label_t> label_weight_;
665
666
};

Guolin Ke's avatar
Guolin Ke committed
667
668
669
670
671
672


/*!
* \brief Objective function for Gamma regression
*/
class RegressionGammaLoss : public RegressionPoissonLoss {
Nikita Titov's avatar
Nikita Titov committed
673
 public:
Guolin Ke's avatar
Guolin Ke committed
674
  explicit RegressionGammaLoss(const Config& config) : RegressionPoissonLoss(config) {
Guolin Ke's avatar
Guolin Ke committed
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
  }

  explicit RegressionGammaLoss(const std::vector<std::string>& strs) : RegressionPoissonLoss(strs) {
  }

  ~RegressionGammaLoss() {}

  void GetGradients(const double* score, score_t* gradients,
                    score_t* hessians) const override {
    if (weights_ == nullptr) {
      #pragma omp parallel for schedule(static)
      for (data_size_t i = 0; i < num_data_; ++i) {
        gradients[i] = static_cast<score_t>(1.0 - label_[i] / std::exp(score[i]));
        hessians[i] = static_cast<score_t>(label_[i] / std::exp(score[i]));
      }
    } else {
      #pragma omp parallel for schedule(static)
      for (data_size_t i = 0; i < num_data_; ++i) {
        gradients[i] = static_cast<score_t>(1.0 - label_[i] / std::exp(score[i]) * weights_[i]);
        hessians[i] = static_cast<score_t>(label_[i] / std::exp(score[i]) * weights_[i]);
      }
    }
  }

  const char* GetName() const override {
    return "gamma";
  }
};

/*!
* \brief Objective function for Tweedie regression
*/
class RegressionTweedieLoss: public RegressionPoissonLoss {
Nikita Titov's avatar
Nikita Titov committed
708
 public:
Guolin Ke's avatar
Guolin Ke committed
709
  explicit RegressionTweedieLoss(const Config& config) : RegressionPoissonLoss(config) {
Guolin Ke's avatar
Guolin Ke committed
710
711
712
713
714
715
716
717
718
719
720
721
722
723
    rho_ = config.tweedie_variance_power;
  }

  explicit RegressionTweedieLoss(const std::vector<std::string>& strs) : RegressionPoissonLoss(strs) {
  }

  ~RegressionTweedieLoss() {}

  void GetGradients(const double* score, score_t* gradients,
                    score_t* hessians) const override {
    if (weights_ == nullptr) {
      #pragma omp parallel for schedule(static)
      for (data_size_t i = 0; i < num_data_; ++i) {
        gradients[i] = static_cast<score_t>(-label_[i] * std::exp((1 - rho_) * score[i]) + std::exp((2 - rho_) * score[i]));
724
        hessians[i] = static_cast<score_t>(-label_[i] * (1 - rho_) * std::exp((1 - rho_) * score[i]) +
Guolin Ke's avatar
Guolin Ke committed
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
          (2 - rho_) * std::exp((2 - rho_) * score[i]));
      }
    } else {
      #pragma omp parallel for schedule(static)
      for (data_size_t i = 0; i < num_data_; ++i) {
        gradients[i] = static_cast<score_t>((-label_[i] * std::exp((1 - rho_) * score[i]) + std::exp((2 - rho_) * score[i])) * weights_[i]);
        hessians[i] = static_cast<score_t>((-label_[i] * (1 - rho_) * std::exp((1 - rho_) * score[i]) +
          (2 - rho_) * std::exp((2 - rho_) * score[i])) * weights_[i]);
      }
    }
  }

  const char* GetName() const override {
    return "tweedie";
  }
740

Nikita Titov's avatar
Nikita Titov committed
741
 private:
Guolin Ke's avatar
Guolin Ke committed
742
743
744
  double rho_;
};

745
746
747
#undef PercentileFun
#undef WeightedPercentileFun

Guolin Ke's avatar
Guolin Ke committed
748
}  // namespace LightGBM
Guolin Ke's avatar
Guolin Ke committed
749
#endif   // LightGBM_OBJECTIVE_REGRESSION_OBJECTIVE_HPP_