"src/vscode:/vscode.git/clone" did not exist on "203df1b4e1845ff0844012bf9dcabdf689d18ef6"
feature_histogram.hpp 37.2 KB
Newer Older
1
2
3
4
/*!
 * Copyright (c) 2016 Microsoft Corporation. All rights reserved.
 * Licensed under the MIT License. See LICENSE file in the project root for license information.
 */
Guolin Ke's avatar
Guolin Ke committed
5
6
7
#ifndef LIGHTGBM_TREELEARNER_FEATURE_HISTOGRAM_HPP_
#define LIGHTGBM_TREELEARNER_FEATURE_HISTOGRAM_HPP_

8
#include <LightGBM/bin.h>
Guolin Ke's avatar
Guolin Ke committed
9
#include <LightGBM/dataset.h>
10
#include <LightGBM/utils/array_args.h>
Guolin Ke's avatar
Guolin Ke committed
11

12
#include <algorithm>
13
#include <cmath>
14
15
16
17
18
#include <cstring>
#include <memory>
#include <utility>
#include <vector>

19
#include "monotone_constraints.hpp"
Nikita Titov's avatar
Nikita Titov committed
20
#include "split_info.hpp"
Guolin Ke's avatar
Guolin Ke committed
21

22
namespace LightGBM {
Guolin Ke's avatar
Guolin Ke committed
23

Guolin Ke's avatar
Guolin Ke committed
24
class FeatureMetainfo {
25
 public:
Guolin Ke's avatar
Guolin Ke committed
26
  int num_bin;
Guolin Ke's avatar
Guolin Ke committed
27
  MissingType missing_type;
28
  int8_t offset = 0;
Guolin Ke's avatar
Guolin Ke committed
29
  uint32_t default_bin;
30
31
  int8_t monotone_type = 0;
  double penalty = 1.0;
Guolin Ke's avatar
Guolin Ke committed
32
  /*! \brief pointer of tree config */
Guolin Ke's avatar
Guolin Ke committed
33
  const Config* config;
34
  BinType bin_type;
Guolin Ke's avatar
Guolin Ke committed
35
};
Guolin Ke's avatar
Guolin Ke committed
36
37
38
39
/*!
* \brief FeatureHistogram is used to construct and store a histogram for a feature.
*/
class FeatureHistogram {
40
 public:
Guolin Ke's avatar
Guolin Ke committed
41
  FeatureHistogram() {
Guolin Ke's avatar
Guolin Ke committed
42
    data_ = nullptr;
Guolin Ke's avatar
Guolin Ke committed
43
  }
Guolin Ke's avatar
Guolin Ke committed
44

Guolin Ke's avatar
Guolin Ke committed
45
46
47
  ~FeatureHistogram() {
  }

Guolin Ke's avatar
Guolin Ke committed
48
49
50
51
52
  /*! \brief Disable copy */
  FeatureHistogram& operator=(const FeatureHistogram&) = delete;
  /*! \brief Disable copy */
  FeatureHistogram(const FeatureHistogram&) = delete;

Guolin Ke's avatar
Guolin Ke committed
53
54
55
56
57
  /*!
  * \brief Init the feature histogram
  * \param feature the feature data for this histogram
  * \param min_num_data_one_leaf minimal number of data in one leaf
  */
58
  void Init(hist_t* data, const FeatureMetainfo* meta) {
Guolin Ke's avatar
Guolin Ke committed
59
60
    meta_ = meta;
    data_ = data;
61
    if (meta_->bin_type == BinType::NumericalBin) {
Nikita Titov's avatar
Nikita Titov committed
62
63
      find_best_threshold_fun_ = std::bind(&FeatureHistogram::FindBestThresholdNumerical, this, std::placeholders::_1,
        std::placeholders::_2, std::placeholders::_3, std::placeholders::_4, std::placeholders::_5);
64
    } else {
Nikita Titov's avatar
Nikita Titov committed
65
66
      find_best_threshold_fun_ = std::bind(&FeatureHistogram::FindBestThresholdCategorical, this, std::placeholders::_1,
        std::placeholders::_2, std::placeholders::_3, std::placeholders::_4, std::placeholders::_5);
67
    }
68
    rand_ = Random(meta_->config->extra_seed);
Guolin Ke's avatar
Guolin Ke committed
69
70
  }

71
  hist_t* RawData() {
Guolin Ke's avatar
Guolin Ke committed
72
    return data_;
Guolin Ke's avatar
Guolin Ke committed
73
74
75
76
77
78
  }
  /*!
  * \brief Subtract current histograms with other
  * \param other The histogram that want to subtract
  */
  void Subtract(const FeatureHistogram& other) {
79
80
    for (int i = 0; i < (meta_->num_bin - meta_->offset) * 2; ++i) {
      data_[i] -= other.data_[i];
Guolin Ke's avatar
Guolin Ke committed
81
82
    }
  }
83

84
  void FindBestThreshold(double sum_gradient, double sum_hessian, data_size_t num_data,
85
    const ConstraintEntry& constraints, SplitInfo* output) {
Guolin Ke's avatar
Guolin Ke committed
86
    output->default_left = true;
Guolin Ke's avatar
Guolin Ke committed
87
    output->gain = kMinScore;
88
    find_best_threshold_fun_(sum_gradient, sum_hessian + 2 * kEpsilon, num_data, constraints, output);
Guolin Ke's avatar
Guolin Ke committed
89
    output->gain *= meta_->penalty;
90
91
  }

92
  void FindBestThresholdNumerical(double sum_gradient, double sum_hessian, data_size_t num_data,
93
    const ConstraintEntry& constraints, SplitInfo* output) {
Guolin Ke's avatar
Guolin Ke committed
94
    is_splittable_ = false;
Guolin Ke's avatar
Guolin Ke committed
95
    double gain_shift = GetLeafSplitGain(sum_gradient, sum_hessian,
96
      meta_->config->lambda_l1, meta_->config->lambda_l2, meta_->config->max_delta_step);
Guolin Ke's avatar
Guolin Ke committed
97
    double min_gain_shift = gain_shift + meta_->config->min_gain_to_split;
98
    int rand_threshold = 0;
99
    if (meta_->num_bin - 2 > 0) {
100
101
      rand_threshold = rand_.NextInt(0, meta_->num_bin - 2);
    }
102
    const bool is_rand = meta_->config->extra_trees;
Guolin Ke's avatar
Guolin Ke committed
103
104
    if (meta_->num_bin > 2 && meta_->missing_type != MissingType::None) {
      if (meta_->missing_type == MissingType::Zero) {
105
        if (is_rand) {
106
107
          FindBestThresholdSequence<true>(sum_gradient, sum_hessian, num_data, constraints, min_gain_shift, output, -1, true, false, rand_threshold);
          FindBestThresholdSequence<true>(sum_gradient, sum_hessian, num_data, constraints, min_gain_shift, output, 1, true, false, rand_threshold);
108
        } else {
109
110
          FindBestThresholdSequence<false>(sum_gradient, sum_hessian, num_data, constraints, min_gain_shift, output, -1, true, false, rand_threshold);
          FindBestThresholdSequence<false>(sum_gradient, sum_hessian, num_data, constraints, min_gain_shift, output, 1, true, false, rand_threshold);
111
        }
Guolin Ke's avatar
Guolin Ke committed
112
      } else {
113
        if (is_rand) {
114
115
          FindBestThresholdSequence<true>(sum_gradient, sum_hessian, num_data, constraints, min_gain_shift, output, -1, false, true, rand_threshold);
          FindBestThresholdSequence<true>(sum_gradient, sum_hessian, num_data, constraints, min_gain_shift, output, 1, false, true, rand_threshold);
116
        } else {
117
118
          FindBestThresholdSequence<false>(sum_gradient, sum_hessian, num_data, constraints, min_gain_shift, output, -1, false, true, rand_threshold);
          FindBestThresholdSequence<false>(sum_gradient, sum_hessian, num_data, constraints, min_gain_shift, output, 1, false, true, rand_threshold);
119
        }
Guolin Ke's avatar
Guolin Ke committed
120
      }
121
    } else {
122
      if (is_rand) {
123
        FindBestThresholdSequence<true>(sum_gradient, sum_hessian, num_data, constraints, min_gain_shift, output, -1, false, false, rand_threshold);
124
      } else {
125
        FindBestThresholdSequence<false>(sum_gradient, sum_hessian, num_data, constraints, min_gain_shift, output, -1, false, false, rand_threshold);
126
      }
Guolin Ke's avatar
Guolin Ke committed
127
128
129
130
      // fix the direction error when only have 2 bins
      if (meta_->missing_type == MissingType::NaN) {
        output->default_left = false;
      }
Guolin Ke's avatar
Guolin Ke committed
131
    }
Guolin Ke's avatar
Guolin Ke committed
132
    output->gain -= min_gain_shift;
Guolin Ke's avatar
Guolin Ke committed
133
    output->monotone_type = meta_->monotone_type;
Guolin Ke's avatar
Guolin Ke committed
134
  }
135

136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
  void FindBestThresholdCategorical(double sum_gradient, double sum_hessian,
                                    data_size_t num_data,
                                    const ConstraintEntry& constraints,
                                    SplitInfo* output) {
    if (meta_->config->extra_trees) {
      FindBestThresholdCategoricalInner<true>(sum_gradient, sum_hessian,
                                              num_data, constraints, output);
    } else {
      FindBestThresholdCategoricalInner<false>(sum_gradient, sum_hessian,
                                               num_data, constraints, output);
    }
  }

  template<bool IS_RAND>
  void FindBestThresholdCategoricalInner(double sum_gradient, double sum_hessian, data_size_t num_data,
151
    const ConstraintEntry& constraints, SplitInfo* output) {
Guolin Ke's avatar
Guolin Ke committed
152
    output->default_left = false;
153
    double best_gain = kMinScore;
154
    data_size_t best_left_count = 0;
ChenZhiyong's avatar
ChenZhiyong committed
155
156
    double best_sum_left_gradient = 0;
    double best_sum_left_hessian = 0;
157
158
    double gain_shift = GetLeafSplitGain(sum_gradient, sum_hessian,
      meta_->config->lambda_l1, meta_->config->lambda_l2, meta_->config->max_delta_step);
159

Guolin Ke's avatar
Guolin Ke committed
160
    double min_gain_shift = gain_shift + meta_->config->min_gain_to_split;
ChenZhiyong's avatar
ChenZhiyong committed
161
    bool is_full_categorical = meta_->missing_type == MissingType::None;
162
    int used_bin = meta_->num_bin - 1 + is_full_categorical;
ChenZhiyong's avatar
ChenZhiyong committed
163

Guolin Ke's avatar
Guolin Ke committed
164
    std::vector<int> sorted_idx;
Guolin Ke's avatar
Guolin Ke committed
165
166
    double l2 = meta_->config->lambda_l2;
    bool use_onehot = meta_->num_bin <= meta_->config->max_cat_to_onehot;
167
168
    int best_threshold = -1;
    int best_dir = 1;
169
    const double cnt_factor = num_data / sum_hessian;
170
    int rand_threshold = 0;
171
    if (use_onehot) {
172
173
174
175
176
      if (IS_RAND) {
        if (used_bin > 0) {
          rand_threshold = rand_.NextInt(0, used_bin);
        }
      }
177
      for (int t = 0; t < used_bin; ++t) {
178
179
180
        const auto grad = GET_GRAD(data_, t);
        const auto hess = GET_HESS(data_, t);
        data_size_t cnt = static_cast<data_size_t>(Common::RoundInt(hess * cnt_factor));
181
        // if data not enough, or sum hessian too small
182
        if (cnt < meta_->config->min_data_in_leaf
183
            || hess < meta_->config->min_sum_hessian_in_leaf) continue;
184
        data_size_t other_count = num_data - cnt;
185
        // if data not enough
Guolin Ke's avatar
Guolin Ke committed
186
        if (other_count < meta_->config->min_data_in_leaf) continue;
ChenZhiyong's avatar
ChenZhiyong committed
187

188
        double sum_other_hessian = sum_hessian - hess - kEpsilon;
189
        // if sum hessian too small
Guolin Ke's avatar
Guolin Ke committed
190
        if (sum_other_hessian < meta_->config->min_sum_hessian_in_leaf) continue;
ChenZhiyong's avatar
ChenZhiyong committed
191

192
        double sum_other_gradient = sum_gradient - grad;
193
194
195
196
197
        if (IS_RAND) {
          if (t != rand_threshold) {
            continue;
          }
        }
198
        // current split gain
199
        double current_gain = GetSplitGains(sum_other_gradient, sum_other_hessian, grad, hess + kEpsilon,
200
          meta_->config->lambda_l1, l2, meta_->config->max_delta_step, constraints, 0);
201
        // gain with split is worse than without split
ChenZhiyong's avatar
ChenZhiyong committed
202
        if (current_gain <= min_gain_shift) continue;
203
204

        // mark to is splittable
ChenZhiyong's avatar
ChenZhiyong committed
205
        is_splittable_ = true;
206
        // better split point
ChenZhiyong's avatar
ChenZhiyong committed
207
        if (current_gain > best_gain) {
208
          best_threshold = t;
209
210
211
          best_sum_left_gradient = grad;
          best_sum_left_hessian = hess + kEpsilon;
          best_left_count = cnt;
ChenZhiyong's avatar
ChenZhiyong committed
212
          best_gain = current_gain;
213
214
215
216
        }
      }
    } else {
      for (int i = 0; i < used_bin; ++i) {
217
        if (Common::RoundInt(GET_HESS(data_, i) * cnt_factor) >= meta_->config->cat_smooth) {
218
219
220
221
222
          sorted_idx.push_back(i);
        }
      }
      used_bin = static_cast<int>(sorted_idx.size());

Guolin Ke's avatar
Guolin Ke committed
223
      l2 += meta_->config->cat_l2;
224
225

      auto ctr_fun = [this](double sum_grad, double sum_hess) {
Guolin Ke's avatar
Guolin Ke committed
226
        return (sum_grad) / (sum_hess + meta_->config->cat_smooth);
227
228
      };
      std::sort(sorted_idx.begin(), sorted_idx.end(),
229
230
231
        [this, &ctr_fun](int i, int j) {
          return ctr_fun(GET_GRAD(data_, i), GET_HESS(data_, i)) < ctr_fun(GET_GRAD(data_, j), GET_HESS(data_, j));
        });
232
233
234
235
236

      std::vector<int> find_direction(1, 1);
      std::vector<int> start_position(1, 0);
      find_direction.push_back(-1);
      start_position.push_back(used_bin - 1);
Guolin Ke's avatar
Guolin Ke committed
237
      const int max_num_cat = std::min(meta_->config->max_cat_threshold, (used_bin + 1) / 2);
238
      int max_threshold = std::max(std::min(max_num_cat, used_bin) - 1, 0);
239
240
241
242
      if (IS_RAND) {
        if (max_threshold > 0) {
          rand_threshold = rand_.NextInt(0, max_threshold);
        }
243
      }
244

245
246
247
248
      is_splittable_ = false;
      for (size_t out_i = 0; out_i < find_direction.size(); ++out_i) {
        auto dir = find_direction[out_i];
        auto start_pos = start_position[out_i];
Guolin Ke's avatar
Guolin Ke committed
249
        data_size_t min_data_per_group = meta_->config->min_data_per_group;
250
251
252
253
254
255
256
        data_size_t cnt_cur_group = 0;
        double sum_left_gradient = 0.0f;
        double sum_left_hessian = kEpsilon;
        data_size_t left_count = 0;
        for (int i = 0; i < used_bin && i < max_num_cat; ++i) {
          auto t = sorted_idx[start_pos];
          start_pos += dir;
257
258
259
          const auto grad = GET_GRAD(data_, t);
          const auto hess = GET_HESS(data_, t);
          data_size_t cnt = static_cast<data_size_t>(Common::RoundInt(hess * cnt_factor));
260

261
262
263
264
          sum_left_gradient += grad;
          sum_left_hessian += hess;
          left_count += cnt;
          cnt_cur_group += cnt;
265

Guolin Ke's avatar
Guolin Ke committed
266
          if (left_count < meta_->config->min_data_in_leaf
267
              || sum_left_hessian < meta_->config->min_sum_hessian_in_leaf) continue;
268
          data_size_t right_count = num_data - left_count;
Guolin Ke's avatar
Guolin Ke committed
269
          if (right_count < meta_->config->min_data_in_leaf || right_count < min_data_per_group) break;
270
271

          double sum_right_hessian = sum_hessian - sum_left_hessian;
Guolin Ke's avatar
Guolin Ke committed
272
          if (sum_right_hessian < meta_->config->min_sum_hessian_in_leaf) break;
273
274
275
276
277
278

          if (cnt_cur_group < min_data_per_group) continue;

          cnt_cur_group = 0;

          double sum_right_gradient = sum_gradient - sum_left_gradient;
279
280
281
          if (IS_RAND) {
            if (i != rand_threshold) {
              continue;
282
            }
283
          }
284
285
286
287
288
289
290
291
292
293
294
295
          double current_gain = GetSplitGains(sum_left_gradient, sum_left_hessian, sum_right_gradient, sum_right_hessian,
            meta_->config->lambda_l1, l2, meta_->config->max_delta_step, constraints, 0);
          if (current_gain <= min_gain_shift) continue;
          is_splittable_ = true;
          if (current_gain > best_gain) {
            best_left_count = left_count;
            best_sum_left_gradient = sum_left_gradient;
            best_sum_left_hessian = sum_left_hessian;
            best_threshold = i;
            best_gain = current_gain;
            best_dir = dir;
          }
ChenZhiyong's avatar
ChenZhiyong committed
296
        }
297
298
      }
    }
299

300
    if (is_splittable_) {
Guolin Ke's avatar
Guolin Ke committed
301
      output->left_output = CalculateSplittedLeafOutput(best_sum_left_gradient, best_sum_left_hessian,
302
        meta_->config->lambda_l1, l2, meta_->config->max_delta_step, constraints);
303
304
305
      output->left_count = best_left_count;
      output->left_sum_gradient = best_sum_left_gradient;
      output->left_sum_hessian = best_sum_left_hessian - kEpsilon;
306
307
      output->right_output = CalculateSplittedLeafOutput(
        sum_gradient - best_sum_left_gradient, sum_hessian - best_sum_left_hessian,
308
        meta_->config->lambda_l1, l2, meta_->config->max_delta_step, constraints);
309
310
311
      output->right_count = num_data - best_left_count;
      output->right_sum_gradient = sum_gradient - best_sum_left_gradient;
      output->right_sum_hessian = sum_hessian - best_sum_left_hessian - kEpsilon;
Guolin Ke's avatar
Guolin Ke committed
312
      output->gain = best_gain - min_gain_shift;
313
314
315
      if (use_onehot) {
        output->num_cat_threshold = 1;
        output->cat_threshold = std::vector<uint32_t>(1, static_cast<uint32_t>(best_threshold));
ChenZhiyong's avatar
ChenZhiyong committed
316
      } else {
317
318
319
320
321
322
323
324
325
326
327
328
        output->num_cat_threshold = best_threshold + 1;
        output->cat_threshold = std::vector<uint32_t>(output->num_cat_threshold);
        if (best_dir == 1) {
          for (int i = 0; i < output->num_cat_threshold; ++i) {
            auto t = sorted_idx[i];
            output->cat_threshold[i] = t;
          }
        } else {
          for (int i = 0; i < output->num_cat_threshold; ++i) {
            auto t = sorted_idx[used_bin - 1 - i];
            output->cat_threshold[i] = t;
          }
ChenZhiyong's avatar
ChenZhiyong committed
329
330
        }
      }
Guolin Ke's avatar
Guolin Ke committed
331
      output->monotone_type = 0;
332
    }
333
334
  }

335
  void GatherInfoForThreshold(double sum_gradient, double sum_hessian,
336
    uint32_t threshold, data_size_t num_data, SplitInfo* output) {
337
    if (meta_->bin_type == BinType::NumericalBin) {
338
      GatherInfoForThresholdNumerical(sum_gradient, sum_hessian, threshold, num_data, output);
339
    } else {
340
      GatherInfoForThresholdCategorical(sum_gradient, sum_hessian, threshold, num_data, output);
341
342
343
344
    }
  }

  void GatherInfoForThresholdNumerical(double sum_gradient, double sum_hessian,
345
    uint32_t threshold, data_size_t num_data, SplitInfo* output) {
346
    double gain_shift = GetLeafSplitGain(sum_gradient, sum_hessian,
347
      meta_->config->lambda_l1, meta_->config->lambda_l2, meta_->config->max_delta_step);
Guolin Ke's avatar
Guolin Ke committed
348
    double min_gain_shift = gain_shift + meta_->config->min_gain_to_split;
349
350

    // do stuff here
351
    const int8_t offset = meta_->offset;
352
353
354
355
356
357

    double sum_right_gradient = 0.0f;
    double sum_right_hessian = kEpsilon;
    data_size_t right_count = 0;

    // set values
358
359
    bool use_na_as_missing = false;
    bool skip_default_bin = false;
360
361
    if (meta_->missing_type == MissingType::Zero) {
      skip_default_bin = true;
362
    } else if (meta_->missing_type == MissingType::NaN) {
363
364
365
      use_na_as_missing = true;
    }

366
367
    int t = meta_->num_bin - 1 - offset - use_na_as_missing;
    const int t_end = 1 - offset;
368
    const double cnt_factor = num_data / sum_hessian;
369
370
    // from right to left, and we don't need data in bin0
    for (; t >= t_end; --t) {
371
      if (static_cast<uint32_t>(t + offset) < threshold) { break; }
372
373

      // need to skip default bin
374
      if (skip_default_bin && (t + offset) == static_cast<int>(meta_->default_bin)) { continue; }
375
376
377
378
379
380
      const auto grad = GET_GRAD(data_, t);
      const auto hess = GET_HESS(data_, t);
      data_size_t cnt = static_cast<data_size_t>(Common::RoundInt(hess * cnt_factor));
      sum_right_gradient += grad;
      sum_right_hessian += hess;
      right_count += cnt;
381
382
383
384
385
    }
    double sum_left_gradient = sum_gradient - sum_right_gradient;
    double sum_left_hessian = sum_hessian - sum_right_hessian;
    data_size_t left_count = num_data - right_count;
    double current_gain = GetLeafSplitGain(sum_left_gradient, sum_left_hessian,
386
      meta_->config->lambda_l1, meta_->config->lambda_l2, meta_->config->max_delta_step)
387
      + GetLeafSplitGain(sum_right_gradient, sum_right_hessian,
388
          meta_->config->lambda_l1, meta_->config->lambda_l2, meta_->config->max_delta_step);
389
390
391
392

    // gain with split is worse than without split
    if (std::isnan(current_gain) || current_gain <= min_gain_shift) {
      output->gain = kMinScore;
393
      Log::Warning("'Forced Split' will be ignored since the gain getting worse. ");
394
      return;
395
    }
396
397
398
399

    // update split information
    output->threshold = threshold;
    output->left_output = CalculateSplittedLeafOutput(sum_left_gradient, sum_left_hessian,
400
      meta_->config->lambda_l1, meta_->config->lambda_l2, meta_->config->max_delta_step);
401
402
403
    output->left_count = left_count;
    output->left_sum_gradient = sum_left_gradient;
    output->left_sum_hessian = sum_left_hessian - kEpsilon;
404
405
406
    output->right_output = CalculateSplittedLeafOutput(
      sum_gradient - sum_left_gradient, sum_hessian - sum_left_hessian,
      meta_->config->lambda_l1, meta_->config->lambda_l2, meta_->config->max_delta_step);
407
408
409
410
411
412
413
414
415
    output->right_count = num_data - left_count;
    output->right_sum_gradient = sum_gradient - sum_left_gradient;
    output->right_sum_hessian = sum_hessian - sum_left_hessian - kEpsilon;
    output->gain = current_gain;
    output->gain -= min_gain_shift;
    output->default_left = true;
  }

  void GatherInfoForThresholdCategorical(double sum_gradient, double sum_hessian,
416
    uint32_t threshold, data_size_t num_data, SplitInfo* output) {
417
418
    // get SplitInfo for a given one-hot categorical split.
    output->default_left = false;
419
420
    double gain_shift = GetLeafSplitGain(sum_gradient, sum_hessian,
      meta_->config->lambda_l1, meta_->config->lambda_l2, meta_->config->max_delta_step);
Guolin Ke's avatar
Guolin Ke committed
421
    double min_gain_shift = gain_shift + meta_->config->min_gain_to_split;
422
423
424
425
426
427
428
    bool is_full_categorical = meta_->missing_type == MissingType::None;
    int used_bin = meta_->num_bin - 1 + is_full_categorical;
    if (threshold >= static_cast<uint32_t>(used_bin)) {
      output->gain = kMinScore;
      Log::Warning("Invalid categorical threshold split");
      return;
    }
429
430
431
432
    const double cnt_factor = num_data / sum_hessian;
    const auto grad = GET_GRAD(data_, threshold);
    const auto hess = GET_HESS(data_, threshold);
    data_size_t cnt = static_cast<data_size_t>(Common::RoundInt(hess * cnt_factor));
433

Guolin Ke's avatar
Guolin Ke committed
434
    double l2 = meta_->config->lambda_l2;
435
    data_size_t left_count = cnt;
436
    data_size_t right_count = num_data - left_count;
437
    double sum_left_hessian = hess + kEpsilon;
438
    double sum_right_hessian = sum_hessian - sum_left_hessian;
439
    double sum_left_gradient = grad;
440
441
442
    double sum_right_gradient = sum_gradient - sum_left_gradient;
    // current split gain
    double current_gain = GetLeafSplitGain(sum_right_gradient, sum_right_hessian,
443
      meta_->config->lambda_l1, l2, meta_->config->max_delta_step)
444
      + GetLeafSplitGain(sum_left_gradient, sum_left_hessian,
445
          meta_->config->lambda_l1, l2, meta_->config->max_delta_step);
446
447
    if (std::isnan(current_gain) || current_gain <= min_gain_shift) {
      output->gain = kMinScore;
448
      Log::Warning("'Forced Split' will be ignored since the gain getting worse.");
449
450
451
452
      return;
    }

    output->left_output = CalculateSplittedLeafOutput(sum_left_gradient, sum_left_hessian,
453
      meta_->config->lambda_l1, l2, meta_->config->max_delta_step);
454
455
456
457
    output->left_count = left_count;
    output->left_sum_gradient = sum_left_gradient;
    output->left_sum_hessian = sum_left_hessian - kEpsilon;
    output->right_output = CalculateSplittedLeafOutput(sum_right_gradient, sum_right_hessian,
458
      meta_->config->lambda_l1, l2, meta_->config->max_delta_step);
459
460
461
462
463
464
465
466
467
    output->right_count = right_count;
    output->right_sum_gradient = sum_gradient - sum_left_gradient;
    output->right_sum_hessian = sum_right_hessian - kEpsilon;
    output->gain = current_gain - min_gain_shift;
    output->num_cat_threshold = 1;
    output->cat_threshold = std::vector<uint32_t>(1, threshold);
  }


Guolin Ke's avatar
Guolin Ke committed
468
469
470
471
  /*!
  * \brief Binary size of this histogram
  */
  int SizeOfHistgram() const {
472
    return (meta_->num_bin - meta_->offset) * kHistEntrySize;
Guolin Ke's avatar
Guolin Ke committed
473
474
475
476
477
  }

  /*!
  * \brief Restore histogram from memory
  */
Guolin Ke's avatar
Guolin Ke committed
478
  void FromMemory(char* memory_data) {
479
    std::memcpy(data_, memory_data, (meta_->num_bin - meta_->offset) * kHistEntrySize);
Guolin Ke's avatar
Guolin Ke committed
480
481
482
483
484
485
486
487
488
489
490
491
  }

  /*!
  * \brief True if this histogram can be splitted
  */
  bool is_splittable() { return is_splittable_; }

  /*!
  * \brief Set splittable to this histogram
  */
  void set_is_splittable(bool val) { is_splittable_ = val; }

492
493
494
495
496
497
498
499
500
501
502
503
  static double ThresholdL1(double s, double l1) {
    const double reg_s = std::max(0.0, std::fabs(s) - l1);
    return Common::Sign(s) * reg_s;
  }

  static double CalculateSplittedLeafOutput(double sum_gradients, double sum_hessians, double l1, double l2, double max_delta_step) {
    double ret = -ThresholdL1(sum_gradients, l1) / (sum_hessians + l2);
    if (max_delta_step <= 0.0f || std::fabs(ret) <= max_delta_step) {
      return ret;
    } else {
      return Common::Sign(ret) * max_delta_step;
    }
Guolin Ke's avatar
Guolin Ke committed
504
505
  }

506
 private:
Guolin Ke's avatar
Guolin Ke committed
507
  static double GetSplitGains(double sum_left_gradients, double sum_left_hessians,
508
509
    double sum_right_gradients, double sum_right_hessians,
    double l1, double l2, double max_delta_step,
510
511
512
    const ConstraintEntry& constraints, int8_t monotone_constraint) {
    double left_output = CalculateSplittedLeafOutput(sum_left_gradients, sum_left_hessians, l1, l2, max_delta_step, constraints);
    double right_output = CalculateSplittedLeafOutput(sum_right_gradients, sum_right_hessians, l1, l2, max_delta_step, constraints);
Guolin Ke's avatar
Guolin Ke committed
513
514
515
516
517
518
519
520
    if (((monotone_constraint > 0) && (left_output > right_output)) ||
      ((monotone_constraint < 0) && (left_output < right_output))) {
      return 0;
    }
    return GetLeafSplitGainGivenOutput(sum_left_gradients, sum_left_hessians, l1, l2, left_output)
      + GetLeafSplitGainGivenOutput(sum_right_gradients, sum_right_hessians, l1, l2, right_output);
  }

Guolin Ke's avatar
Guolin Ke committed
521
  /*!
522
  * \brief Calculate the output of a leaf based on regularized sum_gradients and sum_hessians
Guolin Ke's avatar
Guolin Ke committed
523
524
525
526
  * \param sum_gradients
  * \param sum_hessians
  * \return leaf output
  */
Nikita Titov's avatar
Nikita Titov committed
527
528
529
  static double CalculateSplittedLeafOutput(double sum_gradients, double sum_hessians,
                                            double l1, double l2, double max_delta_step,
                                            const ConstraintEntry& constraints) {
530
    double ret = CalculateSplittedLeafOutput(sum_gradients, sum_hessians, l1, l2, max_delta_step);
531
532
533
534
    if (ret < constraints.min) {
      ret = constraints.min;
    } else if (ret > constraints.max) {
      ret = constraints.max;
Guolin Ke's avatar
Guolin Ke committed
535
536
    }
    return ret;
Guolin Ke's avatar
Guolin Ke committed
537
  }
Guolin Ke's avatar
Guolin Ke committed
538

Guolin Ke's avatar
Guolin Ke committed
539
540
541
542
543
544
  /*!
  * \brief Calculate the split gain based on regularized sum_gradients and sum_hessians
  * \param sum_gradients
  * \param sum_hessians
  * \return split gain
  */
545
546
547
  static double GetLeafSplitGain(double sum_gradients, double sum_hessians, double l1, double l2, double max_delta_step) {
    double output = CalculateSplittedLeafOutput(sum_gradients, sum_hessians, l1, l2, max_delta_step);
    return GetLeafSplitGainGivenOutput(sum_gradients, sum_hessians, l1, l2, output);
Guolin Ke's avatar
Guolin Ke committed
548
549
550
551
552
553
  }

  static double GetLeafSplitGainGivenOutput(double sum_gradients, double sum_hessians, double l1, double l2, double output) {
    const double sg_l1 = ThresholdL1(sum_gradients, l1);
    return -(2.0 * sg_l1 * output + (sum_hessians + l2) * output * output);
  }
Guolin Ke's avatar
Guolin Ke committed
554

555
  template<bool IS_RAND>
556
  void FindBestThresholdSequence(double sum_gradient, double sum_hessian, data_size_t num_data, const ConstraintEntry& constraints,
557
                                 double min_gain_shift, SplitInfo* output, int dir, bool skip_default_bin, bool use_na_as_missing, int rand_threshold) {
558
    const int8_t offset = meta_->offset;
Guolin Ke's avatar
Guolin Ke committed
559
560
561
562
563
564

    double best_sum_left_gradient = NAN;
    double best_sum_left_hessian = NAN;
    double best_gain = kMinScore;
    data_size_t best_left_count = 0;
    uint32_t best_threshold = static_cast<uint32_t>(meta_->num_bin);
565
    const double cnt_factor = num_data / sum_hessian;
Guolin Ke's avatar
Guolin Ke committed
566
567
568
569
570
    if (dir == -1) {
      double sum_right_gradient = 0.0f;
      double sum_right_hessian = kEpsilon;
      data_size_t right_count = 0;

571
572
      int t = meta_->num_bin - 1 - offset - use_na_as_missing;
      const int t_end = 1 - offset;
Guolin Ke's avatar
Guolin Ke committed
573
574
575
576

      // from right to left, and we don't need data in bin0
      for (; t >= t_end; --t) {
        // need to skip default bin
577
        if (skip_default_bin && (t + offset) == static_cast<int>(meta_->default_bin)) { continue; }
Guolin Ke's avatar
Guolin Ke committed
578

579
580
581
582
583
584
        const auto grad = GET_GRAD(data_, t);
        const auto hess = GET_HESS(data_, t);
        data_size_t cnt = static_cast<data_size_t>(Common::RoundInt(hess * cnt_factor));
        sum_right_gradient += grad;
        sum_right_hessian += hess;
        right_count += cnt;
Guolin Ke's avatar
Guolin Ke committed
585
        // if data not enough, or sum hessian too small
Guolin Ke's avatar
Guolin Ke committed
586
        if (right_count < meta_->config->min_data_in_leaf
587
            || sum_right_hessian < meta_->config->min_sum_hessian_in_leaf) continue;
Guolin Ke's avatar
Guolin Ke committed
588
589
        data_size_t left_count = num_data - right_count;
        // if data not enough
Guolin Ke's avatar
Guolin Ke committed
590
        if (left_count < meta_->config->min_data_in_leaf) break;
Guolin Ke's avatar
Guolin Ke committed
591
592
593

        double sum_left_hessian = sum_hessian - sum_right_hessian;
        // if sum hessian too small
Guolin Ke's avatar
Guolin Ke committed
594
        if (sum_left_hessian < meta_->config->min_sum_hessian_in_leaf) break;
Guolin Ke's avatar
Guolin Ke committed
595
596

        double sum_left_gradient = sum_gradient - sum_right_gradient;
597
        if (IS_RAND) {
Guolin Ke's avatar
Guolin Ke committed
598
599
          if (t + offset != rand_threshold) {
            continue;
600
          }
Guolin Ke's avatar
Guolin Ke committed
601
        }
Guolin Ke's avatar
Guolin Ke committed
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
        // current split gain
        double current_gain = GetSplitGains(sum_left_gradient, sum_left_hessian, sum_right_gradient, sum_right_hessian,
                                            meta_->config->lambda_l1, meta_->config->lambda_l2, meta_->config->max_delta_step,
                                            constraints, meta_->monotone_type);
        // gain with split is worse than without split
        if (current_gain <= min_gain_shift) continue;

        // mark to is splittable
        is_splittable_ = true;
        // better split point
        if (current_gain > best_gain) {
          best_left_count = left_count;
          best_sum_left_gradient = sum_left_gradient;
          best_sum_left_hessian = sum_left_hessian;
          // left is <= threshold, right is > threshold.  so this is t-1
          best_threshold = static_cast<uint32_t>(t - 1 + offset);
          best_gain = current_gain;
        }
Guolin Ke's avatar
Guolin Ke committed
620
      }
ChenZhiyong's avatar
ChenZhiyong committed
621
    } else {
Guolin Ke's avatar
Guolin Ke committed
622
623
624
625
626
      double sum_left_gradient = 0.0f;
      double sum_left_hessian = kEpsilon;
      data_size_t left_count = 0;

      int t = 0;
627
      const int t_end = meta_->num_bin - 2 - offset;
Guolin Ke's avatar
Guolin Ke committed
628

629
      if (use_na_as_missing && offset == 1) {
Guolin Ke's avatar
Guolin Ke committed
630
631
632
        sum_left_gradient = sum_gradient;
        sum_left_hessian = sum_hessian - kEpsilon;
        left_count = num_data;
633
        for (int i = 0; i < meta_->num_bin - offset; ++i) {
634
635
636
637
638
639
          const auto grad = GET_GRAD(data_, i);
          const auto hess = GET_HESS(data_, i);
          data_size_t cnt = static_cast<data_size_t>(Common::RoundInt(hess * cnt_factor));
          sum_left_gradient -= grad;
          sum_left_hessian -= hess;
          left_count -= cnt;
Guolin Ke's avatar
Guolin Ke committed
640
641
642
643
        }
        t = -1;
      }

Guolin Ke's avatar
Guolin Ke committed
644
645
      for (; t <= t_end; ++t) {
        // need to skip default bin
646
        if (skip_default_bin && (t + offset) == static_cast<int>(meta_->default_bin)) { continue; }
Guolin Ke's avatar
Guolin Ke committed
647
        if (t >= 0) {
648
649
650
          sum_left_gradient += GET_GRAD(data_, t);
          sum_left_hessian += GET_HESS(data_, t);
          left_count += static_cast<data_size_t>(Common::RoundInt(GET_HESS(data_, t) * cnt_factor));
Guolin Ke's avatar
Guolin Ke committed
651
        }
Guolin Ke's avatar
Guolin Ke committed
652
        // if data not enough, or sum hessian too small
Guolin Ke's avatar
Guolin Ke committed
653
        if (left_count < meta_->config->min_data_in_leaf
654
            || sum_left_hessian < meta_->config->min_sum_hessian_in_leaf) continue;
Guolin Ke's avatar
Guolin Ke committed
655
656
        data_size_t right_count = num_data - left_count;
        // if data not enough
Guolin Ke's avatar
Guolin Ke committed
657
        if (right_count < meta_->config->min_data_in_leaf) break;
Guolin Ke's avatar
Guolin Ke committed
658
659
660

        double sum_right_hessian = sum_hessian - sum_left_hessian;
        // if sum hessian too small
Guolin Ke's avatar
Guolin Ke committed
661
        if (sum_right_hessian < meta_->config->min_sum_hessian_in_leaf) break;
Guolin Ke's avatar
Guolin Ke committed
662
663

        double sum_right_gradient = sum_gradient - sum_left_gradient;
664
        if (IS_RAND) {
Guolin Ke's avatar
Guolin Ke committed
665
666
          if (t + offset != rand_threshold) {
            continue;
667
          }
Guolin Ke's avatar
Guolin Ke committed
668
        }
Guolin Ke's avatar
Guolin Ke committed
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
        // current split gain
        double current_gain = GetSplitGains(sum_left_gradient, sum_left_hessian, sum_right_gradient, sum_right_hessian,
                                            meta_->config->lambda_l1, meta_->config->lambda_l2, meta_->config->max_delta_step,
                                            constraints, meta_->monotone_type);
        // gain with split is worse than without split
        if (current_gain <= min_gain_shift) continue;

        // mark to is splittable
        is_splittable_ = true;
        // better split point
        if (current_gain > best_gain) {
          best_left_count = left_count;
          best_sum_left_gradient = sum_left_gradient;
          best_sum_left_hessian = sum_left_hessian;
          best_threshold = static_cast<uint32_t>(t + offset);
          best_gain = current_gain;
        }
Guolin Ke's avatar
Guolin Ke committed
686
687
688
689
690
691
      }
    }

    if (is_splittable_ && best_gain > output->gain) {
      // update split information
      output->threshold = best_threshold;
692
693
694
695
      output->left_output = CalculateSplittedLeafOutput(
          best_sum_left_gradient, best_sum_left_hessian,
          meta_->config->lambda_l1, meta_->config->lambda_l2,
          meta_->config->max_delta_step, constraints);
Guolin Ke's avatar
Guolin Ke committed
696
697
698
      output->left_count = best_left_count;
      output->left_sum_gradient = best_sum_left_gradient;
      output->left_sum_hessian = best_sum_left_hessian - kEpsilon;
699
700
      output->right_output = CalculateSplittedLeafOutput(
        sum_gradient - best_sum_left_gradient, sum_hessian - best_sum_left_hessian,
701
        meta_->config->lambda_l1, meta_->config->lambda_l2, meta_->config->max_delta_step,
702
        constraints);
Guolin Ke's avatar
Guolin Ke committed
703
704
705
706
      output->right_count = num_data - best_left_count;
      output->right_sum_gradient = sum_gradient - best_sum_left_gradient;
      output->right_sum_hessian = sum_hessian - best_sum_left_hessian - kEpsilon;
      output->gain = best_gain;
Guolin Ke's avatar
Guolin Ke committed
707
      output->default_left = dir == -1;
Guolin Ke's avatar
Guolin Ke committed
708
709
710
    }
  }

Guolin Ke's avatar
Guolin Ke committed
711
  const FeatureMetainfo* meta_;
Guolin Ke's avatar
Guolin Ke committed
712
  /*! \brief sum of gradient of each bin */
713
  hist_t* data_;
Guolin Ke's avatar
Guolin Ke committed
714
  bool is_splittable_ = true;
715
716
  /*! \brief random number generator for extremely randomized trees */
  Random rand_;
717

Nikita Titov's avatar
Nikita Titov committed
718
719
  std::function<void(double, double, data_size_t, const ConstraintEntry&, SplitInfo*)>
    find_best_threshold_fun_;
Guolin Ke's avatar
Guolin Ke committed
720
};
Nikita Titov's avatar
Nikita Titov committed
721

Guolin Ke's avatar
Guolin Ke committed
722
class HistogramPool {
723
 public:
Guolin Ke's avatar
Guolin Ke committed
724
725
726
727
  /*!
  * \brief Constructor
  */
  HistogramPool() {
Guolin Ke's avatar
Guolin Ke committed
728
729
    cache_size_ = 0;
    total_size_ = 0;
Guolin Ke's avatar
Guolin Ke committed
730
  }
731

Guolin Ke's avatar
Guolin Ke committed
732
733
734
735
736
  /*!
  * \brief Destructor
  */
  ~HistogramPool() {
  }
737

Guolin Ke's avatar
Guolin Ke committed
738
739
740
741
742
  /*!
  * \brief Reset pool size
  * \param cache_size Max cache size
  * \param total_size Total size will be used
  */
Guolin Ke's avatar
Guolin Ke committed
743
  void Reset(int cache_size, int total_size) {
Guolin Ke's avatar
Guolin Ke committed
744
745
746
747
748
749
750
751
752
    cache_size_ = cache_size;
    // at least need 2 bucket to store smaller leaf and larger leaf
    CHECK(cache_size_ >= 2);
    total_size_ = total_size;
    if (cache_size_ > total_size_) {
      cache_size_ = total_size_;
    }
    is_enough_ = (cache_size_ == total_size_);
    if (!is_enough_) {
753
754
755
      mapper_.resize(total_size_);
      inverse_mapper_.resize(cache_size_);
      last_used_time_.resize(cache_size_);
Guolin Ke's avatar
Guolin Ke committed
756
757
758
      ResetMap();
    }
  }
759

Guolin Ke's avatar
Guolin Ke committed
760
761
762
763
764
765
766
767
768
769
770
771
  /*!
  * \brief Reset mapper
  */
  void ResetMap() {
    if (!is_enough_) {
      cur_time_ = 0;
      std::fill(mapper_.begin(), mapper_.end(), -1);
      std::fill(inverse_mapper_.begin(), inverse_mapper_.end(), -1);
      std::fill(last_used_time_.begin(), last_used_time_.end(), 0);
    }
  }

772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
  static void SetFeatureInfo(const Dataset* train_data, const Config* config, std::vector<FeatureMetainfo>* feature_meta) {
    auto& ref_feature_meta = *feature_meta;
    const int num_feature = train_data->num_features();
    ref_feature_meta.resize(num_feature);
    #pragma omp parallel for schedule(static)
    for (int i = 0; i < num_feature; ++i) {
      ref_feature_meta[i].num_bin = train_data->FeatureNumBin(i);
      ref_feature_meta[i].default_bin = train_data->FeatureBinMapper(i)->GetDefaultBin();
      ref_feature_meta[i].missing_type = train_data->FeatureBinMapper(i)->missing_type();
      const int real_fidx = train_data->RealFeatureIndex(i);
      if (!config->monotone_constraints.empty()) {
        ref_feature_meta[i].monotone_type = config->monotone_constraints[real_fidx];
      } else {
        ref_feature_meta[i].monotone_type = 0;
      }
      if (!config->feature_contri.empty()) {
        ref_feature_meta[i].penalty = config->feature_contri[real_fidx];
      } else {
        ref_feature_meta[i].penalty = 1.0;
      }
      if (train_data->FeatureBinMapper(i)->GetMostFreqBin() == 0) {
        ref_feature_meta[i].offset = 1;
      } else {
        ref_feature_meta[i].offset = 0;
      }
      ref_feature_meta[i].config = config;
      ref_feature_meta[i].bin_type = train_data->FeatureBinMapper(i)->bin_type();
    }
  }

  static void SetFeatureInfoConfig(const Dataset* train_data, const Config* config, std::vector<FeatureMetainfo>* feature_meta) {
    auto& ref_feature_meta = *feature_meta;
    const int num_feature = train_data->num_features();
    ref_feature_meta.resize(num_feature);
    #pragma omp parallel for schedule(static)
    for (int i = 0; i < num_feature; ++i) {
      const int real_fidx = train_data->RealFeatureIndex(i);
      if (!config->monotone_constraints.empty()) {
        ref_feature_meta[i].monotone_type = config->monotone_constraints[real_fidx];
      } else {
        ref_feature_meta[i].monotone_type = 0;
      }
      if (!config->feature_contri.empty()) {
        ref_feature_meta[i].penalty = config->feature_contri[real_fidx];
      } else {
        ref_feature_meta[i].penalty = 1.0;
      }
      ref_feature_meta[i].config = config;
    }
  }
822
  void DynamicChangeSize(const Dataset* train_data, bool is_hist_colwise, const Config* config, int cache_size, int total_size) {
Guolin Ke's avatar
Guolin Ke committed
823
    if (feature_metas_.empty()) {
824
      SetFeatureInfo(train_data, config, &feature_metas_);
825
      uint64_t bin_cnt_over_features = 0;
826
      for (int i = 0; i < train_data->num_features(); ++i) {
827
        bin_cnt_over_features += static_cast<uint64_t>(feature_metas_[i].num_bin);
Guolin Ke's avatar
Guolin Ke committed
828
      }
829
      Log::Info("Total Bins %d", bin_cnt_over_features);
Guolin Ke's avatar
Guolin Ke committed
830
    }
Guolin Ke's avatar
Guolin Ke committed
831
    int old_cache_size = static_cast<int>(pool_.size());
Guolin Ke's avatar
Guolin Ke committed
832
    Reset(cache_size, total_size);
Guolin Ke's avatar
Guolin Ke committed
833
834
835
836
837

    if (cache_size > old_cache_size) {
      pool_.resize(cache_size);
      data_.resize(cache_size);
    }
838
    int num_total_bin = static_cast<int>(train_data->NumTotalBin());
Guolin Ke's avatar
Guolin Ke committed
839

840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
    std::vector<int> offsets;
    if (is_hist_colwise) {
      int offset = 0;
      for (int j = 0; j < train_data->num_features(); ++j) {
        offset += train_data->SubFeatureBinOffset(j);
        offsets.push_back(offset);
        auto num_bin = train_data->FeatureNumBin(j);
        if (train_data->FeatureBinMapper(j)->GetMostFreqBin() == 0) {
          num_bin -= 1;
        }
        offset += num_bin;
      }
    } else {
      num_total_bin = 1;
      for (int j = 0; j < train_data->num_features(); ++j) {
        offsets.push_back(num_total_bin);
        num_total_bin += train_data->FeatureBinMapper(j)->num_bin();
        if (train_data->FeatureBinMapper(j)->GetMostFreqBin() == 0) {
          num_total_bin -= 1;
        }
      }
    }
862
    OMP_INIT_EX();
Guolin Ke's avatar
Guolin Ke committed
863
    #pragma omp parallel for schedule(static)
Guolin Ke's avatar
Guolin Ke committed
864
    for (int i = old_cache_size; i < cache_size; ++i) {
865
      OMP_LOOP_EX_BEGIN();
Guolin Ke's avatar
Guolin Ke committed
866
      pool_[i].reset(new FeatureHistogram[train_data->num_features()]);
867
      data_[i].resize(num_total_bin * 2);
Guolin Ke's avatar
Guolin Ke committed
868
      for (int j = 0; j < train_data->num_features(); ++j) {
869
        pool_[i][j].Init(data_[i].data() + offsets[j] * 2, &feature_metas_[j]);
Guolin Ke's avatar
Guolin Ke committed
870
      }
871
      OMP_LOOP_EX_END();
Guolin Ke's avatar
Guolin Ke committed
872
    }
873
    OMP_THROW_EX();
Guolin Ke's avatar
Guolin Ke committed
874
875
  }

876
877
  void ResetConfig(const Dataset* train_data, const Config* config) {
    SetFeatureInfoConfig(train_data, config, &feature_metas_);
Guolin Ke's avatar
Guolin Ke committed
878
  }
879

Guolin Ke's avatar
Guolin Ke committed
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
  /*!
  * \brief Get data for the specific index
  * \param idx which index want to get
  * \param out output data will store into this
  * \return True if this index is in the pool, False if this index is not in the pool
  */
  bool Get(int idx, FeatureHistogram** out) {
    if (is_enough_) {
      *out = pool_[idx].get();
      return true;
    } else if (mapper_[idx] >= 0) {
      int slot = mapper_[idx];
      *out = pool_[slot].get();
      last_used_time_[slot] = ++cur_time_;
      return true;
    } else {
896
      // choose the least used slot
Guolin Ke's avatar
Guolin Ke committed
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
      int slot = static_cast<int>(ArrayArgs<int>::ArgMin(last_used_time_));
      *out = pool_[slot].get();
      last_used_time_[slot] = ++cur_time_;

      // reset previous mapper
      if (inverse_mapper_[slot] >= 0) mapper_[inverse_mapper_[slot]] = -1;

      // update current mapper
      mapper_[idx] = slot;
      inverse_mapper_[slot] = idx;
      return false;
    }
  }

  /*!
  * \brief Move data from one index to another index
  * \param src_idx
  * \param dst_idx
  */
  void Move(int src_idx, int dst_idx) {
    if (is_enough_) {
      std::swap(pool_[src_idx], pool_[dst_idx]);
      return;
    }
    if (mapper_[src_idx] < 0) {
      return;
    }
    // get slot of src idx
    int slot = mapper_[src_idx];
    // reset src_idx
    mapper_[src_idx] = -1;

    // move to dst idx
    mapper_[dst_idx] = slot;
    last_used_time_[slot] = ++cur_time_;
    inverse_mapper_[slot] = dst_idx;
  }
934

935
 private:
Guolin Ke's avatar
Guolin Ke committed
936
  std::vector<std::unique_ptr<FeatureHistogram[]>> pool_;
937
  std::vector<std::vector<hist_t, Common::AlignmentAllocator<hist_t, kAlignedSize>>> data_;
Guolin Ke's avatar
Guolin Ke committed
938
  std::vector<FeatureMetainfo> feature_metas_;
Guolin Ke's avatar
Guolin Ke committed
939
940
941
942
943
944
945
946
947
  int cache_size_;
  int total_size_;
  bool is_enough_ = false;
  std::vector<int> mapper_;
  std::vector<int> inverse_mapper_;
  std::vector<int> last_used_time_;
  int cur_time_ = 0;
};

Guolin Ke's avatar
Guolin Ke committed
948
}  // namespace LightGBM
Guolin Ke's avatar
Guolin Ke committed
949
#endif   // LightGBM_TREELEARNER_FEATURE_HISTOGRAM_HPP_