feature_histogram.hpp 45.1 KB
Newer Older
1
2
/*!
 * Copyright (c) 2016 Microsoft Corporation. All rights reserved.
3
4
 * Licensed under the MIT License. See LICENSE file in the project root for
 * license information.
5
 */
Guolin Ke's avatar
Guolin Ke committed
6
7
8
#ifndef LIGHTGBM_TREELEARNER_FEATURE_HISTOGRAM_HPP_
#define LIGHTGBM_TREELEARNER_FEATURE_HISTOGRAM_HPP_

9
#include <LightGBM/bin.h>
Guolin Ke's avatar
Guolin Ke committed
10
#include <LightGBM/dataset.h>
11
#include <LightGBM/utils/array_args.h>
Guolin Ke's avatar
Guolin Ke committed
12

13
#include <algorithm>
14
#include <cmath>
15
16
17
18
19
#include <cstring>
#include <memory>
#include <utility>
#include <vector>

20
#include "monotone_constraints.hpp"
Nikita Titov's avatar
Nikita Titov committed
21
#include "split_info.hpp"
Guolin Ke's avatar
Guolin Ke committed
22

23
namespace LightGBM {
Guolin Ke's avatar
Guolin Ke committed
24

Guolin Ke's avatar
Guolin Ke committed
25
class FeatureMetainfo {
26
 public:
Guolin Ke's avatar
Guolin Ke committed
27
  int num_bin;
Guolin Ke's avatar
Guolin Ke committed
28
  MissingType missing_type;
29
  int8_t offset = 0;
Guolin Ke's avatar
Guolin Ke committed
30
  uint32_t default_bin;
31
32
  int8_t monotone_type = 0;
  double penalty = 1.0;
Guolin Ke's avatar
Guolin Ke committed
33
  /*! \brief pointer of tree config */
Guolin Ke's avatar
Guolin Ke committed
34
  const Config* config;
35
  BinType bin_type;
36
37
  /*! \brief random number generator for extremely randomized trees */
  mutable Random rand;
Guolin Ke's avatar
Guolin Ke committed
38
};
Guolin Ke's avatar
Guolin Ke committed
39
/*!
40
41
42
 * \brief FeatureHistogram is used to construct and store a histogram for a
 * feature.
 */
Guolin Ke's avatar
Guolin Ke committed
43
class FeatureHistogram {
44
 public:
45
  FeatureHistogram() { data_ = nullptr; }
Guolin Ke's avatar
Guolin Ke committed
46

47
  ~FeatureHistogram() {}
Guolin Ke's avatar
Guolin Ke committed
48

Guolin Ke's avatar
Guolin Ke committed
49
50
51
52
53
  /*! \brief Disable copy */
  FeatureHistogram& operator=(const FeatureHistogram&) = delete;
  /*! \brief Disable copy */
  FeatureHistogram(const FeatureHistogram&) = delete;

Guolin Ke's avatar
Guolin Ke committed
54
  /*!
55
56
57
58
   * \brief Init the feature histogram
   * \param feature the feature data for this histogram
   * \param min_num_data_one_leaf minimal number of data in one leaf
   */
59
  void Init(hist_t* data, const FeatureMetainfo* meta) {
Guolin Ke's avatar
Guolin Ke committed
60
61
    meta_ = meta;
    data_ = data;
62
63
64
65
    ResetFunc();
  }

  void ResetFunc() {
66
    if (meta_->bin_type == BinType::NumericalBin) {
67
      FuncForNumrical();
68
    } else {
69
      FuncForCategorical();
70
    }
Guolin Ke's avatar
Guolin Ke committed
71
72
  }

73
  hist_t* RawData() { return data_; }
74

Guolin Ke's avatar
Guolin Ke committed
75
  /*!
76
77
78
   * \brief Subtract current histograms with other
   * \param other The histogram that want to subtract
   */
Guolin Ke's avatar
Guolin Ke committed
79
  void Subtract(const FeatureHistogram& other) {
80
81
    for (int i = 0; i < (meta_->num_bin - meta_->offset) * 2; ++i) {
      data_[i] -= other.data_[i];
Guolin Ke's avatar
Guolin Ke committed
82
83
    }
  }
84

85
86
87
88
  void FindBestThreshold(double sum_gradient, double sum_hessian,
                         data_size_t num_data,
                         const ConstraintEntry& constraints,
                         SplitInfo* output) {
Guolin Ke's avatar
Guolin Ke committed
89
    output->default_left = true;
Guolin Ke's avatar
Guolin Ke committed
90
    output->gain = kMinScore;
91
92
    find_best_threshold_fun_(sum_gradient, sum_hessian + 2 * kEpsilon, num_data,
                             constraints, output);
Guolin Ke's avatar
Guolin Ke committed
93
    output->gain *= meta_->penalty;
94
95
  }

96
97
98
  template <bool USE_RAND, bool USE_L1, bool USE_MAX_OUTPUT>
  double BeforeNumercal(double sum_gradient, double sum_hessian,
                        SplitInfo* output, int* rand_threshold) {
Guolin Ke's avatar
Guolin Ke committed
99
    is_splittable_ = false;
100
101
102
103
104
105
106
107
108
    output->monotone_type = meta_->monotone_type;
    double gain_shift = GetLeafGain<USE_L1, USE_MAX_OUTPUT>(
        sum_gradient, sum_hessian, meta_->config->lambda_l1,
        meta_->config->lambda_l2, meta_->config->max_delta_step);
    *rand_threshold = 0;
    if (USE_RAND) {
      if (meta_->num_bin - 2 > 0) {
        *rand_threshold = meta_->rand.NextInt(0, meta_->num_bin - 2);
      }
109
    }
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
    return gain_shift + meta_->config->min_gain_to_split;
  }

  void FuncForNumrical() {
    if (meta_->config->extra_trees) {
      if (meta_->config->monotone_constraints.empty()) {
        FuncForNumricalL1<true, false>();
      } else {
        FuncForNumricalL1<true, true>();
      }
    } else {
      if (meta_->config->monotone_constraints.empty()) {
        FuncForNumricalL1<false, false>();
      } else {
        FuncForNumricalL1<false, true>();
      }
    }
  }
  template <bool USE_RAND, bool USE_MC>
  void FuncForNumricalL1() {
    if (meta_->config->lambda_l1 > 0) {
      if (meta_->config->max_delta_step > 0) {
        FuncForNumricalL2<USE_RAND, USE_MC, true, true>();
      } else {
        FuncForNumricalL2<USE_RAND, USE_MC, true, false>();
      }
    } else {
      if (meta_->config->max_delta_step > 0) {
        FuncForNumricalL2<USE_RAND, USE_MC, false, true>();
      } else {
        FuncForNumricalL2<USE_RAND, USE_MC, false, false>();
      }
    }
  }

  template <bool USE_RAND, bool USE_MC, bool USE_L1, bool USE_MAX_OUTPUT>
  void FuncForNumricalL2() {
Guolin Ke's avatar
Guolin Ke committed
147
148
    if (meta_->num_bin > 2 && meta_->missing_type != MissingType::None) {
      if (meta_->missing_type == MissingType::Zero) {
149
150
151
152
153
154
155
        find_best_threshold_fun_ =
            [=](double sum_gradient, double sum_hessian, data_size_t num_data,
                const ConstraintEntry& constraints, SplitInfo* output) {
              int rand_threshold = 0;
              double min_gain_shift =
                  BeforeNumercal<USE_RAND, USE_L1, USE_MAX_OUTPUT>(
                      sum_gradient, sum_hessian, output, &rand_threshold);
guolinke's avatar
guolinke committed
156
157
              FindBestThresholdSequentially<USE_RAND, USE_MC, USE_L1,
                                            USE_MAX_OUTPUT, true, true, false>(
158
159
                  sum_gradient, sum_hessian, num_data, constraints,
                  min_gain_shift, output, rand_threshold);
guolinke's avatar
guolinke committed
160
161
              FindBestThresholdSequentially<USE_RAND, USE_MC, USE_L1,
                                            USE_MAX_OUTPUT, false, true, false>(
162
163
164
                  sum_gradient, sum_hessian, num_data, constraints,
                  min_gain_shift, output, rand_threshold);
            };
Guolin Ke's avatar
Guolin Ke committed
165
      } else {
166
167
168
169
170
171
172
        find_best_threshold_fun_ =
            [=](double sum_gradient, double sum_hessian, data_size_t num_data,
                const ConstraintEntry& constraints, SplitInfo* output) {
              int rand_threshold = 0;
              double min_gain_shift =
                  BeforeNumercal<USE_RAND, USE_L1, USE_MAX_OUTPUT>(
                      sum_gradient, sum_hessian, output, &rand_threshold);
guolinke's avatar
guolinke committed
173
174
              FindBestThresholdSequentially<USE_RAND, USE_MC, USE_L1,
                                            USE_MAX_OUTPUT, true, false, true>(
175
176
                  sum_gradient, sum_hessian, num_data, constraints,
                  min_gain_shift, output, rand_threshold);
guolinke's avatar
guolinke committed
177
178
              FindBestThresholdSequentially<USE_RAND, USE_MC, USE_L1,
                                            USE_MAX_OUTPUT, false, false, true>(
179
180
181
                  sum_gradient, sum_hessian, num_data, constraints,
                  min_gain_shift, output, rand_threshold);
            };
Guolin Ke's avatar
Guolin Ke committed
182
      }
183
    } else {
184
185
186
187
188
189
190
191
      if (meta_->missing_type != MissingType::NaN) {
        find_best_threshold_fun_ =
            [=](double sum_gradient, double sum_hessian, data_size_t num_data,
                const ConstraintEntry& constraints, SplitInfo* output) {
              int rand_threshold = 0;
              double min_gain_shift =
                  BeforeNumercal<USE_RAND, USE_L1, USE_MAX_OUTPUT>(
                      sum_gradient, sum_hessian, output, &rand_threshold);
guolinke's avatar
guolinke committed
192
193
              FindBestThresholdSequentially<USE_RAND, USE_MC, USE_L1,
                                            USE_MAX_OUTPUT, true, false, false>(
194
195
196
                  sum_gradient, sum_hessian, num_data, constraints,
                  min_gain_shift, output, rand_threshold);
            };
197
      } else {
198
199
200
201
202
203
204
        find_best_threshold_fun_ =
            [=](double sum_gradient, double sum_hessian, data_size_t num_data,
                const ConstraintEntry& constraints, SplitInfo* output) {
              int rand_threshold = 0;
              double min_gain_shift =
                  BeforeNumercal<USE_RAND, USE_L1, USE_MAX_OUTPUT>(
                      sum_gradient, sum_hessian, output, &rand_threshold);
guolinke's avatar
guolinke committed
205
206
              FindBestThresholdSequentially<USE_RAND, USE_MC, USE_L1,
                                            USE_MAX_OUTPUT, true, false, false>(
207
208
209
210
                  sum_gradient, sum_hessian, num_data, constraints,
                  min_gain_shift, output, rand_threshold);
              output->default_left = false;
            };
Guolin Ke's avatar
Guolin Ke committed
211
      }
Guolin Ke's avatar
Guolin Ke committed
212
213
    }
  }
214

215
  void FuncForCategorical() {
216
    if (meta_->config->extra_trees) {
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
      if (meta_->config->monotone_constraints.empty()) {
        FuncForCategoricalL1<true, false>();
      } else {
        FuncForCategoricalL1<true, true>();
      }
    } else {
      if (meta_->config->monotone_constraints.empty()) {
        FuncForCategoricalL1<false, false>();
      } else {
        FuncForCategoricalL1<false, true>();
      }
    }
  }
  template <bool USE_RAND, bool USE_MC>
  void FuncForCategoricalL1() {
    if (meta_->config->lambda_l1 > 0) {
      if (meta_->config->max_delta_step > 0) {
        find_best_threshold_fun_ =
            std::bind(&FeatureHistogram::FindBestThresholdCategoricalInner<
                          USE_RAND, USE_MC, true, true>,
                      this, std::placeholders::_1, std::placeholders::_2,
                      std::placeholders::_3, std::placeholders::_4,
                      std::placeholders::_5);
      } else {
        find_best_threshold_fun_ =
            std::bind(&FeatureHistogram::FindBestThresholdCategoricalInner<
                          USE_RAND, USE_MC, true, false>,
                      this, std::placeholders::_1, std::placeholders::_2,
                      std::placeholders::_3, std::placeholders::_4,
                      std::placeholders::_5);
      }
248
    } else {
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
      if (meta_->config->max_delta_step > 0) {
        find_best_threshold_fun_ =
            std::bind(&FeatureHistogram::FindBestThresholdCategoricalInner<
                          USE_RAND, USE_MC, false, true>,
                      this, std::placeholders::_1, std::placeholders::_2,
                      std::placeholders::_3, std::placeholders::_4,
                      std::placeholders::_5);
      } else {
        find_best_threshold_fun_ =
            std::bind(&FeatureHistogram::FindBestThresholdCategoricalInner<
                          USE_RAND, USE_MC, false, false>,
                      this, std::placeholders::_1, std::placeholders::_2,
                      std::placeholders::_3, std::placeholders::_4,
                      std::placeholders::_5);
      }
264
265
266
    }
  }

267
268
269
270
271
272
273
  template <bool USE_RAND, bool USE_MC, bool USE_L1, bool USE_MAX_OUTPUT>
  void FindBestThresholdCategoricalInner(double sum_gradient,
                                         double sum_hessian,
                                         data_size_t num_data,
                                         const ConstraintEntry& constraints,
                                         SplitInfo* output) {
    is_splittable_ = false;
Guolin Ke's avatar
Guolin Ke committed
274
    output->default_left = false;
275
    double best_gain = kMinScore;
276
    data_size_t best_left_count = 0;
ChenZhiyong's avatar
ChenZhiyong committed
277
278
    double best_sum_left_gradient = 0;
    double best_sum_left_hessian = 0;
279
280
281
    double gain_shift = GetLeafGain<USE_L1, USE_MAX_OUTPUT>(
        sum_gradient, sum_hessian, meta_->config->lambda_l1,
        meta_->config->lambda_l2, meta_->config->max_delta_step);
282

Guolin Ke's avatar
Guolin Ke committed
283
    double min_gain_shift = gain_shift + meta_->config->min_gain_to_split;
ChenZhiyong's avatar
ChenZhiyong committed
284
    bool is_full_categorical = meta_->missing_type == MissingType::None;
285
    int used_bin = meta_->num_bin - 1 + is_full_categorical;
ChenZhiyong's avatar
ChenZhiyong committed
286

Guolin Ke's avatar
Guolin Ke committed
287
    std::vector<int> sorted_idx;
Guolin Ke's avatar
Guolin Ke committed
288
289
    double l2 = meta_->config->lambda_l2;
    bool use_onehot = meta_->num_bin <= meta_->config->max_cat_to_onehot;
290
291
    int best_threshold = -1;
    int best_dir = 1;
292
    const double cnt_factor = num_data / sum_hessian;
293
    int rand_threshold = 0;
294
    if (use_onehot) {
295
      if (USE_RAND) {
296
        if (used_bin > 0) {
297
          rand_threshold = meta_->rand.NextInt(0, used_bin);
298
299
        }
      }
300
      for (int t = 0; t < used_bin; ++t) {
301
302
        const auto grad = GET_GRAD(data_, t);
        const auto hess = GET_HESS(data_, t);
303
304
        data_size_t cnt =
            static_cast<data_size_t>(Common::RoundInt(hess * cnt_factor));
305
        // if data not enough, or sum hessian too small
306
        if (cnt < meta_->config->min_data_in_leaf ||
307
            hess < meta_->config->min_sum_hessian_in_leaf) {
308
          continue;
309
        }
310
        data_size_t other_count = num_data - cnt;
311
        // if data not enough
312
313
314
        if (other_count < meta_->config->min_data_in_leaf) {
          continue;
        }
ChenZhiyong's avatar
ChenZhiyong committed
315

316
        double sum_other_hessian = sum_hessian - hess - kEpsilon;
317
        // if sum hessian too small
318
        if (sum_other_hessian < meta_->config->min_sum_hessian_in_leaf) {
319
          continue;
320
        }
ChenZhiyong's avatar
ChenZhiyong committed
321

322
        double sum_other_gradient = sum_gradient - grad;
323
        if (USE_RAND) {
324
325
326
327
          if (t != rand_threshold) {
            continue;
          }
        }
328
        // current split gain
329
330
331
332
        double current_gain = GetSplitGains<USE_MC, USE_L1, USE_MAX_OUTPUT>(
            sum_other_gradient, sum_other_hessian, grad, hess + kEpsilon,
            meta_->config->lambda_l1, l2, meta_->config->max_delta_step,
            constraints, 0);
333
        // gain with split is worse than without split
334
335
336
        if (current_gain <= min_gain_shift) {
          continue;
        }
337
338

        // mark to is splittable
ChenZhiyong's avatar
ChenZhiyong committed
339
        is_splittable_ = true;
340
        // better split point
ChenZhiyong's avatar
ChenZhiyong committed
341
        if (current_gain > best_gain) {
342
          best_threshold = t;
343
344
345
          best_sum_left_gradient = grad;
          best_sum_left_hessian = hess + kEpsilon;
          best_left_count = cnt;
ChenZhiyong's avatar
ChenZhiyong committed
346
          best_gain = current_gain;
347
348
349
350
        }
      }
    } else {
      for (int i = 0; i < used_bin; ++i) {
351
352
        if (Common::RoundInt(GET_HESS(data_, i) * cnt_factor) >=
            meta_->config->cat_smooth) {
353
354
355
356
357
          sorted_idx.push_back(i);
        }
      }
      used_bin = static_cast<int>(sorted_idx.size());

Guolin Ke's avatar
Guolin Ke committed
358
      l2 += meta_->config->cat_l2;
359
360

      auto ctr_fun = [this](double sum_grad, double sum_hess) {
Guolin Ke's avatar
Guolin Ke committed
361
        return (sum_grad) / (sum_hess + meta_->config->cat_smooth);
362
363
      };
      std::sort(sorted_idx.begin(), sorted_idx.end(),
364
365
366
367
                [this, &ctr_fun](int i, int j) {
                  return ctr_fun(GET_GRAD(data_, i), GET_HESS(data_, i)) <
                         ctr_fun(GET_GRAD(data_, j), GET_HESS(data_, j));
                });
368
369
370
371
372

      std::vector<int> find_direction(1, 1);
      std::vector<int> start_position(1, 0);
      find_direction.push_back(-1);
      start_position.push_back(used_bin - 1);
373
374
      const int max_num_cat =
          std::min(meta_->config->max_cat_threshold, (used_bin + 1) / 2);
375
      int max_threshold = std::max(std::min(max_num_cat, used_bin) - 1, 0);
376
      if (USE_RAND) {
377
        if (max_threshold > 0) {
378
          rand_threshold = meta_->rand.NextInt(0, max_threshold);
379
        }
380
      }
381

382
383
384
385
      is_splittable_ = false;
      for (size_t out_i = 0; out_i < find_direction.size(); ++out_i) {
        auto dir = find_direction[out_i];
        auto start_pos = start_position[out_i];
Guolin Ke's avatar
Guolin Ke committed
386
        data_size_t min_data_per_group = meta_->config->min_data_per_group;
387
388
389
390
391
392
393
        data_size_t cnt_cur_group = 0;
        double sum_left_gradient = 0.0f;
        double sum_left_hessian = kEpsilon;
        data_size_t left_count = 0;
        for (int i = 0; i < used_bin && i < max_num_cat; ++i) {
          auto t = sorted_idx[start_pos];
          start_pos += dir;
394
395
          const auto grad = GET_GRAD(data_, t);
          const auto hess = GET_HESS(data_, t);
396
397
          data_size_t cnt =
              static_cast<data_size_t>(Common::RoundInt(hess * cnt_factor));
398

399
400
401
402
          sum_left_gradient += grad;
          sum_left_hessian += hess;
          left_count += cnt;
          cnt_cur_group += cnt;
403

404
          if (left_count < meta_->config->min_data_in_leaf ||
405
              sum_left_hessian < meta_->config->min_sum_hessian_in_leaf) {
406
            continue;
407
          }
408
          data_size_t right_count = num_data - left_count;
409
          if (right_count < meta_->config->min_data_in_leaf ||
410
              right_count < min_data_per_group) {
411
            break;
412
          }
413
414

          double sum_right_hessian = sum_hessian - sum_left_hessian;
415
416
417
          if (sum_right_hessian < meta_->config->min_sum_hessian_in_leaf) {
            break;
          }
418

419
420
421
          if (cnt_cur_group < min_data_per_group) {
            continue;
          }
422
423
424
425

          cnt_cur_group = 0;

          double sum_right_gradient = sum_gradient - sum_left_gradient;
426
          if (USE_RAND) {
427
428
            if (i != rand_threshold) {
              continue;
429
            }
430
          }
431
432
433
434
          double current_gain = GetSplitGains<USE_MC, USE_L1, USE_MAX_OUTPUT>(
              sum_left_gradient, sum_left_hessian, sum_right_gradient,
              sum_right_hessian, meta_->config->lambda_l1, l2,
              meta_->config->max_delta_step, constraints, 0);
435
436
437
          if (current_gain <= min_gain_shift) {
            continue;
          }
438
439
440
441
442
443
444
445
446
          is_splittable_ = true;
          if (current_gain > best_gain) {
            best_left_count = left_count;
            best_sum_left_gradient = sum_left_gradient;
            best_sum_left_hessian = sum_left_hessian;
            best_threshold = i;
            best_gain = current_gain;
            best_dir = dir;
          }
ChenZhiyong's avatar
ChenZhiyong committed
447
        }
448
449
      }
    }
450

451
    if (is_splittable_) {
452
453
454
455
456
      output->left_output =
          CalculateSplittedLeafOutput<USE_MC, USE_L1, USE_MAX_OUTPUT>(
              best_sum_left_gradient, best_sum_left_hessian,
              meta_->config->lambda_l1, l2, meta_->config->max_delta_step,
              constraints);
457
458
459
      output->left_count = best_left_count;
      output->left_sum_gradient = best_sum_left_gradient;
      output->left_sum_hessian = best_sum_left_hessian - kEpsilon;
460
461
462
463
464
      output->right_output =
          CalculateSplittedLeafOutput<USE_MC, USE_L1, USE_MAX_OUTPUT>(
              sum_gradient - best_sum_left_gradient,
              sum_hessian - best_sum_left_hessian, meta_->config->lambda_l1, l2,
              meta_->config->max_delta_step, constraints);
465
466
      output->right_count = num_data - best_left_count;
      output->right_sum_gradient = sum_gradient - best_sum_left_gradient;
467
468
      output->right_sum_hessian =
          sum_hessian - best_sum_left_hessian - kEpsilon;
Guolin Ke's avatar
Guolin Ke committed
469
      output->gain = best_gain - min_gain_shift;
470
471
      if (use_onehot) {
        output->num_cat_threshold = 1;
472
473
        output->cat_threshold =
            std::vector<uint32_t>(1, static_cast<uint32_t>(best_threshold));
ChenZhiyong's avatar
ChenZhiyong committed
474
      } else {
475
        output->num_cat_threshold = best_threshold + 1;
476
477
        output->cat_threshold =
            std::vector<uint32_t>(output->num_cat_threshold);
478
479
480
481
482
483
484
485
486
487
        if (best_dir == 1) {
          for (int i = 0; i < output->num_cat_threshold; ++i) {
            auto t = sorted_idx[i];
            output->cat_threshold[i] = t;
          }
        } else {
          for (int i = 0; i < output->num_cat_threshold; ++i) {
            auto t = sorted_idx[used_bin - 1 - i];
            output->cat_threshold[i] = t;
          }
ChenZhiyong's avatar
ChenZhiyong committed
488
489
        }
      }
Guolin Ke's avatar
Guolin Ke committed
490
      output->monotone_type = 0;
491
    }
492
493
  }

494
  void GatherInfoForThreshold(double sum_gradient, double sum_hessian,
495
496
                              uint32_t threshold, data_size_t num_data,
                              SplitInfo* output) {
497
    if (meta_->bin_type == BinType::NumericalBin) {
498
499
      GatherInfoForThresholdNumerical(sum_gradient, sum_hessian, threshold,
                                      num_data, output);
500
    } else {
501
502
      GatherInfoForThresholdCategorical(sum_gradient, sum_hessian, threshold,
                                        num_data, output);
503
504
505
506
    }
  }

  void GatherInfoForThresholdNumerical(double sum_gradient, double sum_hessian,
507
508
509
510
511
                                       uint32_t threshold, data_size_t num_data,
                                       SplitInfo* output) {
    double gain_shift = GetLeafGain<true, true>(
        sum_gradient, sum_hessian, meta_->config->lambda_l1,
        meta_->config->lambda_l2, meta_->config->max_delta_step);
Guolin Ke's avatar
Guolin Ke committed
512
    double min_gain_shift = gain_shift + meta_->config->min_gain_to_split;
513
514

    // do stuff here
515
    const int8_t offset = meta_->offset;
516
517
518
519
520
521

    double sum_right_gradient = 0.0f;
    double sum_right_hessian = kEpsilon;
    data_size_t right_count = 0;

    // set values
522
523
    bool use_na_as_missing = false;
    bool skip_default_bin = false;
524
525
    if (meta_->missing_type == MissingType::Zero) {
      skip_default_bin = true;
526
    } else if (meta_->missing_type == MissingType::NaN) {
527
528
529
      use_na_as_missing = true;
    }

530
531
    int t = meta_->num_bin - 1 - offset - use_na_as_missing;
    const int t_end = 1 - offset;
532
    const double cnt_factor = num_data / sum_hessian;
533
534
    // from right to left, and we don't need data in bin0
    for (; t >= t_end; --t) {
535
536
537
      if (static_cast<uint32_t>(t + offset) < threshold) {
        break;
      }
538
539

      // need to skip default bin
540
541
542
543
      if (skip_default_bin &&
          (t + offset) == static_cast<int>(meta_->default_bin)) {
        continue;
      }
544
545
      const auto grad = GET_GRAD(data_, t);
      const auto hess = GET_HESS(data_, t);
546
547
      data_size_t cnt =
          static_cast<data_size_t>(Common::RoundInt(hess * cnt_factor));
548
549
550
      sum_right_gradient += grad;
      sum_right_hessian += hess;
      right_count += cnt;
551
552
553
554
    }
    double sum_left_gradient = sum_gradient - sum_right_gradient;
    double sum_left_hessian = sum_hessian - sum_right_hessian;
    data_size_t left_count = num_data - right_count;
555
556
557
558
559
560
561
    double current_gain =
        GetLeafGain<true, true>(
            sum_left_gradient, sum_left_hessian, meta_->config->lambda_l1,
            meta_->config->lambda_l2, meta_->config->max_delta_step) +
        GetLeafGain<true, true>(
            sum_right_gradient, sum_right_hessian, meta_->config->lambda_l1,
            meta_->config->lambda_l2, meta_->config->max_delta_step);
562
563
564
565

    // gain with split is worse than without split
    if (std::isnan(current_gain) || current_gain <= min_gain_shift) {
      output->gain = kMinScore;
566
      Log::Warning(
567
          "'Forced Split' will be ignored since the gain getting worse.");
568
      return;
569
    }
570
571
572

    // update split information
    output->threshold = threshold;
573
574
575
    output->left_output = CalculateSplittedLeafOutput<true, true>(
        sum_left_gradient, sum_left_hessian, meta_->config->lambda_l1,
        meta_->config->lambda_l2, meta_->config->max_delta_step);
576
577
578
    output->left_count = left_count;
    output->left_sum_gradient = sum_left_gradient;
    output->left_sum_hessian = sum_left_hessian - kEpsilon;
579
580
581
582
    output->right_output = CalculateSplittedLeafOutput<true, true>(
        sum_gradient - sum_left_gradient, sum_hessian - sum_left_hessian,
        meta_->config->lambda_l1, meta_->config->lambda_l2,
        meta_->config->max_delta_step);
583
584
585
    output->right_count = num_data - left_count;
    output->right_sum_gradient = sum_gradient - sum_left_gradient;
    output->right_sum_hessian = sum_hessian - sum_left_hessian - kEpsilon;
586
    output->gain = current_gain - min_gain_shift;
587
588
589
    output->default_left = true;
  }

590
591
592
593
  void GatherInfoForThresholdCategorical(double sum_gradient,
                                         double sum_hessian, uint32_t threshold,
                                         data_size_t num_data,
                                         SplitInfo* output) {
594
595
    // get SplitInfo for a given one-hot categorical split.
    output->default_left = false;
596
597
598
    double gain_shift = GetLeafGain<true, true>(
        sum_gradient, sum_hessian, meta_->config->lambda_l1,
        meta_->config->lambda_l2, meta_->config->max_delta_step);
Guolin Ke's avatar
Guolin Ke committed
599
    double min_gain_shift = gain_shift + meta_->config->min_gain_to_split;
600
601
602
603
604
605
606
    bool is_full_categorical = meta_->missing_type == MissingType::None;
    int used_bin = meta_->num_bin - 1 + is_full_categorical;
    if (threshold >= static_cast<uint32_t>(used_bin)) {
      output->gain = kMinScore;
      Log::Warning("Invalid categorical threshold split");
      return;
    }
607
608
609
    const double cnt_factor = num_data / sum_hessian;
    const auto grad = GET_GRAD(data_, threshold);
    const auto hess = GET_HESS(data_, threshold);
610
611
    data_size_t cnt =
        static_cast<data_size_t>(Common::RoundInt(hess * cnt_factor));
612

Guolin Ke's avatar
Guolin Ke committed
613
    double l2 = meta_->config->lambda_l2;
614
    data_size_t left_count = cnt;
615
    data_size_t right_count = num_data - left_count;
616
    double sum_left_hessian = hess + kEpsilon;
617
    double sum_right_hessian = sum_hessian - sum_left_hessian;
618
    double sum_left_gradient = grad;
619
620
    double sum_right_gradient = sum_gradient - sum_left_gradient;
    // current split gain
621
622
623
624
625
626
627
    double current_gain =
        GetLeafGain<true, true>(sum_right_gradient, sum_right_hessian,
                                meta_->config->lambda_l1, l2,
                                meta_->config->max_delta_step) +
        GetLeafGain<true, true>(sum_left_gradient, sum_left_hessian,
                                meta_->config->lambda_l1, l2,
                                meta_->config->max_delta_step);
628
629
    if (std::isnan(current_gain) || current_gain <= min_gain_shift) {
      output->gain = kMinScore;
630
631
      Log::Warning(
          "'Forced Split' will be ignored since the gain getting worse.");
632
633
634
      return;
    }

635
636
637
    output->left_output = CalculateSplittedLeafOutput<true, true>(
        sum_left_gradient, sum_left_hessian, meta_->config->lambda_l1, l2,
        meta_->config->max_delta_step);
638
639
640
    output->left_count = left_count;
    output->left_sum_gradient = sum_left_gradient;
    output->left_sum_hessian = sum_left_hessian - kEpsilon;
641
642
643
    output->right_output = CalculateSplittedLeafOutput<true, true>(
        sum_right_gradient, sum_right_hessian, meta_->config->lambda_l1, l2,
        meta_->config->max_delta_step);
644
645
646
647
648
649
650
651
    output->right_count = right_count;
    output->right_sum_gradient = sum_gradient - sum_left_gradient;
    output->right_sum_hessian = sum_right_hessian - kEpsilon;
    output->gain = current_gain - min_gain_shift;
    output->num_cat_threshold = 1;
    output->cat_threshold = std::vector<uint32_t>(1, threshold);
  }

Guolin Ke's avatar
Guolin Ke committed
652
  /*!
653
654
   * \brief Binary size of this histogram
   */
Guolin Ke's avatar
Guolin Ke committed
655
  int SizeOfHistgram() const {
656
    return (meta_->num_bin - meta_->offset) * kHistEntrySize;
Guolin Ke's avatar
Guolin Ke committed
657
658
659
  }

  /*!
660
661
   * \brief Restore histogram from memory
   */
Guolin Ke's avatar
Guolin Ke committed
662
  void FromMemory(char* memory_data) {
663
664
    std::memcpy(data_, memory_data,
                (meta_->num_bin - meta_->offset) * kHistEntrySize);
Guolin Ke's avatar
Guolin Ke committed
665
666
667
  }

  /*!
668
669
   * \brief True if this histogram can be splitted
   */
Guolin Ke's avatar
Guolin Ke committed
670
671
672
  bool is_splittable() { return is_splittable_; }

  /*!
673
674
   * \brief Set splittable to this histogram
   */
Guolin Ke's avatar
Guolin Ke committed
675
676
  void set_is_splittable(bool val) { is_splittable_ = val; }

677
678
679
680
681
  static double ThresholdL1(double s, double l1) {
    const double reg_s = std::max(0.0, std::fabs(s) - l1);
    return Common::Sign(s) * reg_s;
  }

682
683
684
685
686
687
688
689
690
691
692
  template <bool USE_L1, bool USE_MAX_OUTPUT>
  static double CalculateSplittedLeafOutput(double sum_gradients,
                                            double sum_hessians, double l1,
                                            double l2, double max_delta_step) {
    if (USE_L1) {
      double ret = -ThresholdL1(sum_gradients, l1) / (sum_hessians + l2);
      if (USE_MAX_OUTPUT) {
        if (max_delta_step > 0 && std::fabs(ret) > max_delta_step) {
          return Common::Sign(ret) * max_delta_step;
        }
      }
693
694
      return ret;
    } else {
695
696
697
698
699
700
701
      double ret = -sum_gradients / (sum_hessians + l2);
      if (USE_MAX_OUTPUT) {
        if (max_delta_step > 0 && std::fabs(ret) > max_delta_step) {
          return Common::Sign(ret) * max_delta_step;
        }
      }
      return ret;
702
    }
Guolin Ke's avatar
Guolin Ke committed
703
704
  }

705
706
707
708
709
710
711
712
713
714
715
716
717
718
  template <bool USE_MC, bool USE_L1, bool USE_MAX_OUTPUT>
  static double CalculateSplittedLeafOutput(
      double sum_gradients, double sum_hessians, double l1, double l2,
      double max_delta_step, const ConstraintEntry& constraints) {
    double ret = CalculateSplittedLeafOutput<USE_L1, USE_MAX_OUTPUT>(
        sum_gradients, sum_hessians, l1, l2, max_delta_step);
    if (USE_MC) {
      if (ret < constraints.min) {
        ret = constraints.min;
      } else if (ret > constraints.max) {
        ret = constraints.max;
      }
    }
    return ret;
Guolin Ke's avatar
Guolin Ke committed
719
720
  }

721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
 private:
  template <bool USE_MC, bool USE_L1, bool USE_MAX_OUTPUT>
  static double GetSplitGains(double sum_left_gradients,
                              double sum_left_hessians,
                              double sum_right_gradients,
                              double sum_right_hessians, double l1, double l2,
                              double max_delta_step,
                              const ConstraintEntry& constraints,
                              int8_t monotone_constraint) {
    if (!USE_MC) {
      return GetLeafGain<USE_L1, USE_MAX_OUTPUT>(sum_left_gradients,
                                                 sum_left_hessians, l1, l2,
                                                 max_delta_step) +
             GetLeafGain<USE_L1, USE_MAX_OUTPUT>(sum_right_gradients,
                                                 sum_right_hessians, l1, l2,
                                                 max_delta_step);
    } else {
      double left_output =
          CalculateSplittedLeafOutput<USE_MC, USE_L1, USE_MAX_OUTPUT>(
              sum_left_gradients, sum_left_hessians, l1, l2, max_delta_step,
              constraints);
      double right_output =
          CalculateSplittedLeafOutput<USE_MC, USE_L1, USE_MAX_OUTPUT>(
              sum_right_gradients, sum_right_hessians, l1, l2, max_delta_step,
              constraints);
      if (((monotone_constraint > 0) && (left_output > right_output)) ||
          ((monotone_constraint < 0) && (left_output < right_output))) {
        return 0;
      }
      return GetLeafGainGivenOutput<USE_L1>(
                 sum_left_gradients, sum_left_hessians, l1, l2, left_output) +
             GetLeafGainGivenOutput<USE_L1>(
                 sum_right_gradients, sum_right_hessians, l1, l2, right_output);
Guolin Ke's avatar
Guolin Ke committed
754
    }
Guolin Ke's avatar
Guolin Ke committed
755
  }
Guolin Ke's avatar
Guolin Ke committed
756

757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
  template <bool USE_L1, bool USE_MAX_OUTPUT>
  static double GetLeafGain(double sum_gradients, double sum_hessians,
                            double l1, double l2, double max_delta_step) {
    if (!USE_MAX_OUTPUT) {
      if (USE_L1) {
        const double sg_l1 = ThresholdL1(sum_gradients, l1);
        return (sg_l1 * sg_l1) / (sum_hessians + l2);
      } else {
        return (sum_gradients * sum_gradients) / (sum_hessians + l2);
      }
    } else {
      double output = CalculateSplittedLeafOutput<USE_L1, USE_MAX_OUTPUT>(
          sum_gradients, sum_hessians, l1, l2, max_delta_step);
      return GetLeafGainGivenOutput<USE_L1>(sum_gradients, sum_hessians, l1, l2,
                                            output);
    }
Guolin Ke's avatar
Guolin Ke committed
773
774
  }

775
776
777
778
779
780
781
782
783
784
785
  template <bool USE_L1>
  static double GetLeafGainGivenOutput(double sum_gradients,
                                       double sum_hessians, double l1,
                                       double l2, double output) {
    if (USE_L1) {
      const double sg_l1 = ThresholdL1(sum_gradients, l1);
      return -(2.0 * sg_l1 * output + (sum_hessians + l2) * output * output);
    } else {
      return -(2.0 * sum_gradients * output +
               (sum_hessians + l2) * output * output);
    }
Guolin Ke's avatar
Guolin Ke committed
786
  }
Guolin Ke's avatar
Guolin Ke committed
787

788
789
  template <bool USE_RAND, bool USE_MC, bool USE_L1, bool USE_MAX_OUTPUT,
            bool REVERSE, bool SKIP_DEFAULT_BIN, bool NA_AS_MISSING>
guolinke's avatar
guolinke committed
790
791
792
793
794
  void FindBestThresholdSequentially(double sum_gradient, double sum_hessian,
                                     data_size_t num_data,
                                     const ConstraintEntry& constraints,
                                     double min_gain_shift, SplitInfo* output,
                                     int rand_threshold) {
795
    const int8_t offset = meta_->offset;
Guolin Ke's avatar
Guolin Ke committed
796
797
798
799
800
    double best_sum_left_gradient = NAN;
    double best_sum_left_hessian = NAN;
    double best_gain = kMinScore;
    data_size_t best_left_count = 0;
    uint32_t best_threshold = static_cast<uint32_t>(meta_->num_bin);
801
    const double cnt_factor = num_data / sum_hessian;
802
    if (REVERSE) {
Guolin Ke's avatar
Guolin Ke committed
803
804
805
806
      double sum_right_gradient = 0.0f;
      double sum_right_hessian = kEpsilon;
      data_size_t right_count = 0;

807
      int t = meta_->num_bin - 1 - offset - NA_AS_MISSING;
808
      const int t_end = 1 - offset;
Guolin Ke's avatar
Guolin Ke committed
809
810
811
812

      // from right to left, and we don't need data in bin0
      for (; t >= t_end; --t) {
        // need to skip default bin
813
814
815
816
817
        if (SKIP_DEFAULT_BIN) {
          if ((t + offset) == static_cast<int>(meta_->default_bin)) {
            continue;
          }
        }
818
819
        const auto grad = GET_GRAD(data_, t);
        const auto hess = GET_HESS(data_, t);
820
821
        data_size_t cnt =
            static_cast<data_size_t>(Common::RoundInt(hess * cnt_factor));
822
823
824
        sum_right_gradient += grad;
        sum_right_hessian += hess;
        right_count += cnt;
Guolin Ke's avatar
Guolin Ke committed
825
        // if data not enough, or sum hessian too small
826
        if (right_count < meta_->config->min_data_in_leaf ||
827
            sum_right_hessian < meta_->config->min_sum_hessian_in_leaf) {
828
          continue;
829
        }
Guolin Ke's avatar
Guolin Ke committed
830
831
        data_size_t left_count = num_data - right_count;
        // if data not enough
832
833
834
        if (left_count < meta_->config->min_data_in_leaf) {
          break;
        }
Guolin Ke's avatar
Guolin Ke committed
835
836
837

        double sum_left_hessian = sum_hessian - sum_right_hessian;
        // if sum hessian too small
838
839
840
        if (sum_left_hessian < meta_->config->min_sum_hessian_in_leaf) {
          break;
        }
Guolin Ke's avatar
Guolin Ke committed
841
842

        double sum_left_gradient = sum_gradient - sum_right_gradient;
843
        if (USE_RAND) {
844
          if (t - 1 + offset != rand_threshold) {
Guolin Ke's avatar
Guolin Ke committed
845
            continue;
846
          }
Guolin Ke's avatar
Guolin Ke committed
847
        }
Guolin Ke's avatar
Guolin Ke committed
848
        // current split gain
849
850
851
852
853
        double current_gain = GetSplitGains<USE_MC, USE_L1, USE_MAX_OUTPUT>(
            sum_left_gradient, sum_left_hessian, sum_right_gradient,
            sum_right_hessian, meta_->config->lambda_l1,
            meta_->config->lambda_l2, meta_->config->max_delta_step,
            constraints, meta_->monotone_type);
Guolin Ke's avatar
Guolin Ke committed
854
        // gain with split is worse than without split
855
856
857
        if (current_gain <= min_gain_shift) {
          continue;
        }
Guolin Ke's avatar
Guolin Ke committed
858
859
860
861
862
863
864
865
866
867
868
869

        // mark to is splittable
        is_splittable_ = true;
        // better split point
        if (current_gain > best_gain) {
          best_left_count = left_count;
          best_sum_left_gradient = sum_left_gradient;
          best_sum_left_hessian = sum_left_hessian;
          // left is <= threshold, right is > threshold.  so this is t-1
          best_threshold = static_cast<uint32_t>(t - 1 + offset);
          best_gain = current_gain;
        }
Guolin Ke's avatar
Guolin Ke committed
870
      }
ChenZhiyong's avatar
ChenZhiyong committed
871
    } else {
Guolin Ke's avatar
Guolin Ke committed
872
873
874
875
876
      double sum_left_gradient = 0.0f;
      double sum_left_hessian = kEpsilon;
      data_size_t left_count = 0;

      int t = 0;
877
      const int t_end = meta_->num_bin - 2 - offset;
Guolin Ke's avatar
Guolin Ke committed
878

879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
      if (NA_AS_MISSING) {
        if (offset == 1) {
          sum_left_gradient = sum_gradient;
          sum_left_hessian = sum_hessian - kEpsilon;
          left_count = num_data;
          for (int i = 0; i < meta_->num_bin - offset; ++i) {
            const auto grad = GET_GRAD(data_, i);
            const auto hess = GET_HESS(data_, i);
            data_size_t cnt =
                static_cast<data_size_t>(Common::RoundInt(hess * cnt_factor));
            sum_left_gradient -= grad;
            sum_left_hessian -= hess;
            left_count -= cnt;
          }
          t = -1;
Guolin Ke's avatar
Guolin Ke committed
894
895
896
        }
      }

Guolin Ke's avatar
Guolin Ke committed
897
      for (; t <= t_end; ++t) {
898
899
900
901
902
        if (SKIP_DEFAULT_BIN) {
          if ((t + offset) == static_cast<int>(meta_->default_bin)) {
            continue;
          }
        }
Guolin Ke's avatar
Guolin Ke committed
903
        if (t >= 0) {
904
905
          sum_left_gradient += GET_GRAD(data_, t);
          sum_left_hessian += GET_HESS(data_, t);
906
907
          left_count += static_cast<data_size_t>(
              Common::RoundInt(GET_HESS(data_, t) * cnt_factor));
Guolin Ke's avatar
Guolin Ke committed
908
        }
Guolin Ke's avatar
Guolin Ke committed
909
        // if data not enough, or sum hessian too small
910
        if (left_count < meta_->config->min_data_in_leaf ||
911
            sum_left_hessian < meta_->config->min_sum_hessian_in_leaf) {
912
          continue;
913
        }
Guolin Ke's avatar
Guolin Ke committed
914
915
        data_size_t right_count = num_data - left_count;
        // if data not enough
916
917
918
        if (right_count < meta_->config->min_data_in_leaf) {
          break;
        }
Guolin Ke's avatar
Guolin Ke committed
919
920
921

        double sum_right_hessian = sum_hessian - sum_left_hessian;
        // if sum hessian too small
922
923
924
        if (sum_right_hessian < meta_->config->min_sum_hessian_in_leaf) {
          break;
        }
Guolin Ke's avatar
Guolin Ke committed
925
926

        double sum_right_gradient = sum_gradient - sum_left_gradient;
927
        if (USE_RAND) {
Guolin Ke's avatar
Guolin Ke committed
928
929
          if (t + offset != rand_threshold) {
            continue;
930
          }
Guolin Ke's avatar
Guolin Ke committed
931
        }
Guolin Ke's avatar
Guolin Ke committed
932
        // current split gain
933
934
935
936
937
        double current_gain = GetSplitGains<USE_MC, USE_L1, USE_MAX_OUTPUT>(
            sum_left_gradient, sum_left_hessian, sum_right_gradient,
            sum_right_hessian, meta_->config->lambda_l1,
            meta_->config->lambda_l2, meta_->config->max_delta_step,
            constraints, meta_->monotone_type);
Guolin Ke's avatar
Guolin Ke committed
938
        // gain with split is worse than without split
939
940
941
        if (current_gain <= min_gain_shift) {
          continue;
        }
Guolin Ke's avatar
Guolin Ke committed
942
943
944
945
946
947
948
949
950
951
952

        // mark to is splittable
        is_splittable_ = true;
        // better split point
        if (current_gain > best_gain) {
          best_left_count = left_count;
          best_sum_left_gradient = sum_left_gradient;
          best_sum_left_hessian = sum_left_hessian;
          best_threshold = static_cast<uint32_t>(t + offset);
          best_gain = current_gain;
        }
Guolin Ke's avatar
Guolin Ke committed
953
954
955
      }
    }

956
    if (is_splittable_ && best_gain > output->gain + min_gain_shift) {
Guolin Ke's avatar
Guolin Ke committed
957
958
      // update split information
      output->threshold = best_threshold;
959
960
961
962
963
      output->left_output =
          CalculateSplittedLeafOutput<USE_MC, USE_L1, USE_MAX_OUTPUT>(
              best_sum_left_gradient, best_sum_left_hessian,
              meta_->config->lambda_l1, meta_->config->lambda_l2,
              meta_->config->max_delta_step, constraints);
Guolin Ke's avatar
Guolin Ke committed
964
965
966
      output->left_count = best_left_count;
      output->left_sum_gradient = best_sum_left_gradient;
      output->left_sum_hessian = best_sum_left_hessian - kEpsilon;
967
968
969
970
971
972
      output->right_output =
          CalculateSplittedLeafOutput<USE_MC, USE_L1, USE_MAX_OUTPUT>(
              sum_gradient - best_sum_left_gradient,
              sum_hessian - best_sum_left_hessian, meta_->config->lambda_l1,
              meta_->config->lambda_l2, meta_->config->max_delta_step,
              constraints);
Guolin Ke's avatar
Guolin Ke committed
973
974
      output->right_count = num_data - best_left_count;
      output->right_sum_gradient = sum_gradient - best_sum_left_gradient;
975
976
977
978
      output->right_sum_hessian =
          sum_hessian - best_sum_left_hessian - kEpsilon;
      output->gain = best_gain - min_gain_shift;
      output->default_left = REVERSE;
Guolin Ke's avatar
Guolin Ke committed
979
980
981
    }
  }

Guolin Ke's avatar
Guolin Ke committed
982
  const FeatureMetainfo* meta_;
Guolin Ke's avatar
Guolin Ke committed
983
  /*! \brief sum of gradient of each bin */
984
  hist_t* data_;
Guolin Ke's avatar
Guolin Ke committed
985
  bool is_splittable_ = true;
986

987
988
989
  std::function<void(double, double, data_size_t, const ConstraintEntry&,
                     SplitInfo*)>
      find_best_threshold_fun_;
Guolin Ke's avatar
Guolin Ke committed
990
};
Nikita Titov's avatar
Nikita Titov committed
991

Guolin Ke's avatar
Guolin Ke committed
992
class HistogramPool {
993
 public:
Guolin Ke's avatar
Guolin Ke committed
994
  /*!
995
996
   * \brief Constructor
   */
Guolin Ke's avatar
Guolin Ke committed
997
  HistogramPool() {
Guolin Ke's avatar
Guolin Ke committed
998
999
    cache_size_ = 0;
    total_size_ = 0;
Guolin Ke's avatar
Guolin Ke committed
1000
  }
1001

Guolin Ke's avatar
Guolin Ke committed
1002
  /*!
1003
1004
1005
   * \brief Destructor
   */
  ~HistogramPool() {}
1006

Guolin Ke's avatar
Guolin Ke committed
1007
  /*!
1008
1009
1010
1011
   * \brief Reset pool size
   * \param cache_size Max cache size
   * \param total_size Total size will be used
   */
Guolin Ke's avatar
Guolin Ke committed
1012
  void Reset(int cache_size, int total_size) {
Guolin Ke's avatar
Guolin Ke committed
1013
1014
    cache_size_ = cache_size;
    // at least need 2 bucket to store smaller leaf and larger leaf
1015
    CHECK_GE(cache_size_, 2);
Guolin Ke's avatar
Guolin Ke committed
1016
1017
1018
1019
1020
1021
    total_size_ = total_size;
    if (cache_size_ > total_size_) {
      cache_size_ = total_size_;
    }
    is_enough_ = (cache_size_ == total_size_);
    if (!is_enough_) {
1022
1023
1024
      mapper_.resize(total_size_);
      inverse_mapper_.resize(cache_size_);
      last_used_time_.resize(cache_size_);
Guolin Ke's avatar
Guolin Ke committed
1025
1026
1027
      ResetMap();
    }
  }
1028

Guolin Ke's avatar
Guolin Ke committed
1029
  /*!
1030
1031
   * \brief Reset mapper
   */
Guolin Ke's avatar
Guolin Ke committed
1032
1033
1034
1035
1036
1037
1038
1039
  void ResetMap() {
    if (!is_enough_) {
      cur_time_ = 0;
      std::fill(mapper_.begin(), mapper_.end(), -1);
      std::fill(inverse_mapper_.begin(), inverse_mapper_.end(), -1);
      std::fill(last_used_time_.begin(), last_used_time_.end(), 0);
    }
  }
1040
1041
1042
  template <bool USE_DATA, bool USE_CONFIG>
  static void SetFeatureInfo(const Dataset* train_data, const Config* config,
                             std::vector<FeatureMetainfo>* feature_meta) {
1043
1044
1045
    auto& ref_feature_meta = *feature_meta;
    const int num_feature = train_data->num_features();
    ref_feature_meta.resize(num_feature);
1046
#pragma omp parallel for schedule(static, 512) if (num_feature >= 1024)
1047
    for (int i = 0; i < num_feature; ++i) {
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
      if (USE_DATA) {
        ref_feature_meta[i].num_bin = train_data->FeatureNumBin(i);
        ref_feature_meta[i].default_bin =
            train_data->FeatureBinMapper(i)->GetDefaultBin();
        ref_feature_meta[i].missing_type =
            train_data->FeatureBinMapper(i)->missing_type();
        if (train_data->FeatureBinMapper(i)->GetMostFreqBin() == 0) {
          ref_feature_meta[i].offset = 1;
        } else {
          ref_feature_meta[i].offset = 0;
        }
        ref_feature_meta[i].bin_type =
            train_data->FeatureBinMapper(i)->bin_type();
1061
      }
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
      if (USE_CONFIG) {
        const int real_fidx = train_data->RealFeatureIndex(i);
        if (!config->monotone_constraints.empty()) {
          ref_feature_meta[i].monotone_type =
              config->monotone_constraints[real_fidx];
        } else {
          ref_feature_meta[i].monotone_type = 0;
        }
        if (!config->feature_contri.empty()) {
          ref_feature_meta[i].penalty = config->feature_contri[real_fidx];
        } else {
          ref_feature_meta[i].penalty = 1.0;
        }
        ref_feature_meta[i].rand = Random(config->extra_seed + i);
1076
1077
1078
1079
1080
      }
      ref_feature_meta[i].config = config;
    }
  }

1081
1082
  void DynamicChangeSize(const Dataset* train_data, bool is_hist_colwise,
                         const Config* config, int cache_size, int total_size) {
Guolin Ke's avatar
Guolin Ke committed
1083
    if (feature_metas_.empty()) {
1084
      SetFeatureInfo<true, true>(train_data, config, &feature_metas_);
1085
      uint64_t bin_cnt_over_features = 0;
1086
      for (int i = 0; i < train_data->num_features(); ++i) {
1087
1088
        bin_cnt_over_features +=
            static_cast<uint64_t>(feature_metas_[i].num_bin);
Guolin Ke's avatar
Guolin Ke committed
1089
      }
1090
      Log::Info("Total Bins %d", bin_cnt_over_features);
Guolin Ke's avatar
Guolin Ke committed
1091
    }
Guolin Ke's avatar
Guolin Ke committed
1092
    int old_cache_size = static_cast<int>(pool_.size());
Guolin Ke's avatar
Guolin Ke committed
1093
    Reset(cache_size, total_size);
Guolin Ke's avatar
Guolin Ke committed
1094
1095
1096
1097
1098

    if (cache_size > old_cache_size) {
      pool_.resize(cache_size);
      data_.resize(cache_size);
    }
1099
    int num_total_bin = static_cast<int>(train_data->NumTotalBin());
Guolin Ke's avatar
Guolin Ke committed
1100

1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
    std::vector<int> offsets;
    if (is_hist_colwise) {
      int offset = 0;
      for (int j = 0; j < train_data->num_features(); ++j) {
        offset += train_data->SubFeatureBinOffset(j);
        offsets.push_back(offset);
        auto num_bin = train_data->FeatureNumBin(j);
        if (train_data->FeatureBinMapper(j)->GetMostFreqBin() == 0) {
          num_bin -= 1;
        }
        offset += num_bin;
      }
    } else {
      num_total_bin = 1;
      for (int j = 0; j < train_data->num_features(); ++j) {
        offsets.push_back(num_total_bin);
        num_total_bin += train_data->FeatureBinMapper(j)->num_bin();
        if (train_data->FeatureBinMapper(j)->GetMostFreqBin() == 0) {
          num_total_bin -= 1;
        }
      }
    }
1123
    OMP_INIT_EX();
1124
#pragma omp parallel for schedule(static)
Guolin Ke's avatar
Guolin Ke committed
1125
    for (int i = old_cache_size; i < cache_size; ++i) {
1126
      OMP_LOOP_EX_BEGIN();
Guolin Ke's avatar
Guolin Ke committed
1127
      pool_[i].reset(new FeatureHistogram[train_data->num_features()]);
1128
      data_[i].resize(num_total_bin * 2);
Guolin Ke's avatar
Guolin Ke committed
1129
      for (int j = 0; j < train_data->num_features(); ++j) {
1130
        pool_[i][j].Init(data_[i].data() + offsets[j] * 2, &feature_metas_[j]);
Guolin Ke's avatar
Guolin Ke committed
1131
      }
1132
      OMP_LOOP_EX_END();
Guolin Ke's avatar
Guolin Ke committed
1133
    }
1134
    OMP_THROW_EX();
Guolin Ke's avatar
Guolin Ke committed
1135
1136
  }

1137
  void ResetConfig(const Dataset* train_data, const Config* config) {
1138
1139
    CHECK_GT(train_data->num_features(), 0);
    const Config* old_config = feature_metas_[0].config;
1140
    SetFeatureInfo<false, true>(train_data, config, &feature_metas_);
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
    // if need to reset the function pointers
    if (old_config->lambda_l1 != config->lambda_l1 ||
        old_config->monotone_constraints != config->monotone_constraints ||
        old_config->extra_trees != config->extra_trees ||
        old_config->max_delta_step != config->max_delta_step) {
#pragma omp parallel for schedule(static)
      for (int i = 0; i < cache_size_; ++i) {
        for (int j = 0; j < train_data->num_features(); ++j) {
          pool_[i][j].ResetFunc();
        }
      }
    }
Guolin Ke's avatar
Guolin Ke committed
1153
  }
1154

Guolin Ke's avatar
Guolin Ke committed
1155
  /*!
1156
1157
1158
1159
1160
1161
   * \brief Get data for the specific index
   * \param idx which index want to get
   * \param out output data will store into this
   * \return True if this index is in the pool, False if this index is not in
   * the pool
   */
Guolin Ke's avatar
Guolin Ke committed
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
  bool Get(int idx, FeatureHistogram** out) {
    if (is_enough_) {
      *out = pool_[idx].get();
      return true;
    } else if (mapper_[idx] >= 0) {
      int slot = mapper_[idx];
      *out = pool_[slot].get();
      last_used_time_[slot] = ++cur_time_;
      return true;
    } else {
1172
      // choose the least used slot
Guolin Ke's avatar
Guolin Ke committed
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
      int slot = static_cast<int>(ArrayArgs<int>::ArgMin(last_used_time_));
      *out = pool_[slot].get();
      last_used_time_[slot] = ++cur_time_;

      // reset previous mapper
      if (inverse_mapper_[slot] >= 0) mapper_[inverse_mapper_[slot]] = -1;

      // update current mapper
      mapper_[idx] = slot;
      inverse_mapper_[slot] = idx;
      return false;
    }
  }

  /*!
1188
1189
1190
1191
   * \brief Move data from one index to another index
   * \param src_idx
   * \param dst_idx
   */
Guolin Ke's avatar
Guolin Ke committed
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
  void Move(int src_idx, int dst_idx) {
    if (is_enough_) {
      std::swap(pool_[src_idx], pool_[dst_idx]);
      return;
    }
    if (mapper_[src_idx] < 0) {
      return;
    }
    // get slot of src idx
    int slot = mapper_[src_idx];
    // reset src_idx
    mapper_[src_idx] = -1;

    // move to dst idx
    mapper_[dst_idx] = slot;
    last_used_time_[slot] = ++cur_time_;
    inverse_mapper_[slot] = dst_idx;
  }
1210

1211
 private:
Guolin Ke's avatar
Guolin Ke committed
1212
  std::vector<std::unique_ptr<FeatureHistogram[]>> pool_;
1213
1214
1215
  std::vector<
      std::vector<hist_t, Common::AlignmentAllocator<hist_t, kAlignedSize>>>
      data_;
Guolin Ke's avatar
Guolin Ke committed
1216
  std::vector<FeatureMetainfo> feature_metas_;
Guolin Ke's avatar
Guolin Ke committed
1217
1218
1219
1220
1221
1222
1223
1224
1225
  int cache_size_;
  int total_size_;
  bool is_enough_ = false;
  std::vector<int> mapper_;
  std::vector<int> inverse_mapper_;
  std::vector<int> last_used_time_;
  int cur_time_ = 0;
};

Guolin Ke's avatar
Guolin Ke committed
1226
}  // namespace LightGBM
1227
#endif  // LightGBM_TREELEARNER_FEATURE_HISTOGRAM_HPP_