"vscode:/vscode.git/clone" did not exist on "9cc3777c8a892c7ca253864839a926fd35f2b775"
feature_histogram.hpp 36.2 KB
Newer Older
1
2
3
4
/*!
 * Copyright (c) 2016 Microsoft Corporation. All rights reserved.
 * Licensed under the MIT License. See LICENSE file in the project root for license information.
 */
Guolin Ke's avatar
Guolin Ke committed
5
6
7
#ifndef LIGHTGBM_TREELEARNER_FEATURE_HISTOGRAM_HPP_
#define LIGHTGBM_TREELEARNER_FEATURE_HISTOGRAM_HPP_

8
#include <LightGBM/bin.h>
Guolin Ke's avatar
Guolin Ke committed
9
#include <LightGBM/dataset.h>
10
#include <LightGBM/utils/array_args.h>
Guolin Ke's avatar
Guolin Ke committed
11

12
#include <algorithm>
13
#include <cmath>
14
15
16
17
18
#include <cstring>
#include <memory>
#include <utility>
#include <vector>

19
#include "monotone_constraints.hpp"
Nikita Titov's avatar
Nikita Titov committed
20
#include "split_info.hpp"
Guolin Ke's avatar
Guolin Ke committed
21

22
namespace LightGBM {
Guolin Ke's avatar
Guolin Ke committed
23

Guolin Ke's avatar
Guolin Ke committed
24
class FeatureMetainfo {
25
 public:
Guolin Ke's avatar
Guolin Ke committed
26
  int num_bin;
Guolin Ke's avatar
Guolin Ke committed
27
  MissingType missing_type;
28
  int8_t offset = 0;
Guolin Ke's avatar
Guolin Ke committed
29
  uint32_t default_bin;
30
31
  int8_t monotone_type = 0;
  double penalty = 1.0;
Guolin Ke's avatar
Guolin Ke committed
32
  /*! \brief pointer of tree config */
Guolin Ke's avatar
Guolin Ke committed
33
  const Config* config;
34
  BinType bin_type;
Guolin Ke's avatar
Guolin Ke committed
35
};
Guolin Ke's avatar
Guolin Ke committed
36
37
38
39
/*!
* \brief FeatureHistogram is used to construct and store a histogram for a feature.
*/
class FeatureHistogram {
40
 public:
Guolin Ke's avatar
Guolin Ke committed
41
  FeatureHistogram() {
Guolin Ke's avatar
Guolin Ke committed
42
    data_ = nullptr;
Guolin Ke's avatar
Guolin Ke committed
43
  }
Guolin Ke's avatar
Guolin Ke committed
44

Guolin Ke's avatar
Guolin Ke committed
45
46
47
  ~FeatureHistogram() {
  }

Guolin Ke's avatar
Guolin Ke committed
48
49
50
51
52
  /*! \brief Disable copy */
  FeatureHistogram& operator=(const FeatureHistogram&) = delete;
  /*! \brief Disable copy */
  FeatureHistogram(const FeatureHistogram&) = delete;

Guolin Ke's avatar
Guolin Ke committed
53
54
55
56
57
  /*!
  * \brief Init the feature histogram
  * \param feature the feature data for this histogram
  * \param min_num_data_one_leaf minimal number of data in one leaf
  */
58
  void Init(hist_t* data, const FeatureMetainfo* meta) {
Guolin Ke's avatar
Guolin Ke committed
59
60
    meta_ = meta;
    data_ = data;
61
    if (meta_->bin_type == BinType::NumericalBin) {
Nikita Titov's avatar
Nikita Titov committed
62
63
      find_best_threshold_fun_ = std::bind(&FeatureHistogram::FindBestThresholdNumerical, this, std::placeholders::_1,
        std::placeholders::_2, std::placeholders::_3, std::placeholders::_4, std::placeholders::_5);
64
    } else {
Nikita Titov's avatar
Nikita Titov committed
65
66
      find_best_threshold_fun_ = std::bind(&FeatureHistogram::FindBestThresholdCategorical, this, std::placeholders::_1,
        std::placeholders::_2, std::placeholders::_3, std::placeholders::_4, std::placeholders::_5);
67
    }
68
    rand_ = Random(meta_->config->extra_seed);
Guolin Ke's avatar
Guolin Ke committed
69
70
  }

71
  hist_t* RawData() {
Guolin Ke's avatar
Guolin Ke committed
72
    return data_;
Guolin Ke's avatar
Guolin Ke committed
73
74
75
76
77
78
  }
  /*!
  * \brief Subtract current histograms with other
  * \param other The histogram that want to subtract
  */
  void Subtract(const FeatureHistogram& other) {
79
80
    for (int i = 0; i < (meta_->num_bin - meta_->offset) * 2; ++i) {
      data_[i] -= other.data_[i];
Guolin Ke's avatar
Guolin Ke committed
81
82
    }
  }
83

84
  void FindBestThreshold(double sum_gradient, double sum_hessian, data_size_t num_data,
85
    const ConstraintEntry& constraints, SplitInfo* output) {
Guolin Ke's avatar
Guolin Ke committed
86
    output->default_left = true;
Guolin Ke's avatar
Guolin Ke committed
87
    output->gain = kMinScore;
88
    find_best_threshold_fun_(sum_gradient, sum_hessian + 2 * kEpsilon, num_data, constraints, output);
Guolin Ke's avatar
Guolin Ke committed
89
    output->gain *= meta_->penalty;
90
91
  }

92
  void FindBestThresholdNumerical(double sum_gradient, double sum_hessian, data_size_t num_data,
93
    const ConstraintEntry& constraints, SplitInfo* output) {
Guolin Ke's avatar
Guolin Ke committed
94
    is_splittable_ = false;
Guolin Ke's avatar
Guolin Ke committed
95
    double gain_shift = GetLeafSplitGain(sum_gradient, sum_hessian,
96
      meta_->config->lambda_l1, meta_->config->lambda_l2, meta_->config->max_delta_step);
Guolin Ke's avatar
Guolin Ke committed
97
    double min_gain_shift = gain_shift + meta_->config->min_gain_to_split;
98
    int rand_threshold = 0;
99
    if (meta_->num_bin - 2 > 0) {
100
101
102
      rand_threshold = rand_.NextInt(0, meta_->num_bin - 2);
    }
    bool is_rand = meta_->config->extra_trees;
Guolin Ke's avatar
Guolin Ke committed
103
104
    if (meta_->num_bin > 2 && meta_->missing_type != MissingType::None) {
      if (meta_->missing_type == MissingType::Zero) {
105
        if (is_rand) {
106
107
          FindBestThresholdSequence<true>(sum_gradient, sum_hessian, num_data, constraints, min_gain_shift, output, -1, true, false, rand_threshold);
          FindBestThresholdSequence<true>(sum_gradient, sum_hessian, num_data, constraints, min_gain_shift, output, 1, true, false, rand_threshold);
108
        } else {
109
110
          FindBestThresholdSequence<false>(sum_gradient, sum_hessian, num_data, constraints, min_gain_shift, output, -1, true, false, rand_threshold);
          FindBestThresholdSequence<false>(sum_gradient, sum_hessian, num_data, constraints, min_gain_shift, output, 1, true, false, rand_threshold);
111
        }
Guolin Ke's avatar
Guolin Ke committed
112
      } else {
113
        if (is_rand) {
114
115
          FindBestThresholdSequence<true>(sum_gradient, sum_hessian, num_data, constraints, min_gain_shift, output, -1, false, true, rand_threshold);
          FindBestThresholdSequence<true>(sum_gradient, sum_hessian, num_data, constraints, min_gain_shift, output, 1, false, true, rand_threshold);
116
        } else {
117
118
          FindBestThresholdSequence<false>(sum_gradient, sum_hessian, num_data, constraints, min_gain_shift, output, -1, false, true, rand_threshold);
          FindBestThresholdSequence<false>(sum_gradient, sum_hessian, num_data, constraints, min_gain_shift, output, 1, false, true, rand_threshold);
119
        }
Guolin Ke's avatar
Guolin Ke committed
120
      }
121
    } else {
122
      if (is_rand) {
123
        FindBestThresholdSequence<true>(sum_gradient, sum_hessian, num_data, constraints, min_gain_shift, output, -1, false, false, rand_threshold);
124
      } else {
125
        FindBestThresholdSequence<false>(sum_gradient, sum_hessian, num_data, constraints, min_gain_shift, output, -1, false, false, rand_threshold);
126
      }
Guolin Ke's avatar
Guolin Ke committed
127
128
129
130
      // fix the direction error when only have 2 bins
      if (meta_->missing_type == MissingType::NaN) {
        output->default_left = false;
      }
Guolin Ke's avatar
Guolin Ke committed
131
    }
Guolin Ke's avatar
Guolin Ke committed
132
    output->gain -= min_gain_shift;
Guolin Ke's avatar
Guolin Ke committed
133
    output->monotone_type = meta_->monotone_type;
Guolin Ke's avatar
Guolin Ke committed
134
  }
135

136
  void FindBestThresholdCategorical(double sum_gradient, double sum_hessian, data_size_t num_data,
137
    const ConstraintEntry& constraints, SplitInfo* output) {
Guolin Ke's avatar
Guolin Ke committed
138
    output->default_left = false;
139
    double best_gain = kMinScore;
140
    data_size_t best_left_count = 0;
ChenZhiyong's avatar
ChenZhiyong committed
141
142
    double best_sum_left_gradient = 0;
    double best_sum_left_hessian = 0;
143
144
    double gain_shift = GetLeafSplitGain(sum_gradient, sum_hessian,
      meta_->config->lambda_l1, meta_->config->lambda_l2, meta_->config->max_delta_step);
145

Guolin Ke's avatar
Guolin Ke committed
146
    double min_gain_shift = gain_shift + meta_->config->min_gain_to_split;
ChenZhiyong's avatar
ChenZhiyong committed
147
    bool is_full_categorical = meta_->missing_type == MissingType::None;
148
    int used_bin = meta_->num_bin - 1 + is_full_categorical;
ChenZhiyong's avatar
ChenZhiyong committed
149

Guolin Ke's avatar
Guolin Ke committed
150
    std::vector<int> sorted_idx;
Guolin Ke's avatar
Guolin Ke committed
151
152
    double l2 = meta_->config->lambda_l2;
    bool use_onehot = meta_->num_bin <= meta_->config->max_cat_to_onehot;
153
154
    int best_threshold = -1;
    int best_dir = 1;
155
    const double cnt_factor = num_data / sum_hessian;
156
157
    if (use_onehot) {
      for (int t = 0; t < used_bin; ++t) {
158
159
160
        const auto grad = GET_GRAD(data_, t);
        const auto hess = GET_HESS(data_, t);
        data_size_t cnt = static_cast<data_size_t>(Common::RoundInt(hess * cnt_factor));
161
        // if data not enough, or sum hessian too small
162
        if (cnt < meta_->config->min_data_in_leaf
163
            || hess < meta_->config->min_sum_hessian_in_leaf) continue;
164
        data_size_t other_count = num_data - cnt;
165
        // if data not enough
Guolin Ke's avatar
Guolin Ke committed
166
        if (other_count < meta_->config->min_data_in_leaf) continue;
ChenZhiyong's avatar
ChenZhiyong committed
167

168
        double sum_other_hessian = sum_hessian - hess - kEpsilon;
169
        // if sum hessian too small
Guolin Ke's avatar
Guolin Ke committed
170
        if (sum_other_hessian < meta_->config->min_sum_hessian_in_leaf) continue;
ChenZhiyong's avatar
ChenZhiyong committed
171

172
        double sum_other_gradient = sum_gradient - grad;
173
        // current split gain
174
        double current_gain = GetSplitGains(sum_other_gradient, sum_other_hessian, grad, hess + kEpsilon,
175
          meta_->config->lambda_l1, l2, meta_->config->max_delta_step, constraints, 0);
176
        // gain with split is worse than without split
ChenZhiyong's avatar
ChenZhiyong committed
177
        if (current_gain <= min_gain_shift) continue;
178
179

        // mark to is splittable
ChenZhiyong's avatar
ChenZhiyong committed
180
        is_splittable_ = true;
181
        // better split point
ChenZhiyong's avatar
ChenZhiyong committed
182
        if (current_gain > best_gain) {
183
          best_threshold = t;
184
185
186
          best_sum_left_gradient = grad;
          best_sum_left_hessian = hess + kEpsilon;
          best_left_count = cnt;
ChenZhiyong's avatar
ChenZhiyong committed
187
          best_gain = current_gain;
188
189
190
191
        }
      }
    } else {
      for (int i = 0; i < used_bin; ++i) {
192
        if (Common::RoundInt(GET_HESS(data_, i) * cnt_factor) >= meta_->config->cat_smooth) {
193
194
195
196
197
          sorted_idx.push_back(i);
        }
      }
      used_bin = static_cast<int>(sorted_idx.size());

Guolin Ke's avatar
Guolin Ke committed
198
      l2 += meta_->config->cat_l2;
199
200

      auto ctr_fun = [this](double sum_grad, double sum_hess) {
Guolin Ke's avatar
Guolin Ke committed
201
        return (sum_grad) / (sum_hess + meta_->config->cat_smooth);
202
203
      };
      std::sort(sorted_idx.begin(), sorted_idx.end(),
204
205
206
        [this, &ctr_fun](int i, int j) {
          return ctr_fun(GET_GRAD(data_, i), GET_HESS(data_, i)) < ctr_fun(GET_GRAD(data_, j), GET_HESS(data_, j));
        });
207
208
209
210
211

      std::vector<int> find_direction(1, 1);
      std::vector<int> start_position(1, 0);
      find_direction.push_back(-1);
      start_position.push_back(used_bin - 1);
Guolin Ke's avatar
Guolin Ke committed
212
      const int max_num_cat = std::min(meta_->config->max_cat_threshold, (used_bin + 1) / 2);
213
214
215
216
217
      int max_threshold = std::max(std::min(max_num_cat, used_bin) - 1, 0);
      int rand_threshold = 0;
      if (max_threshold > 0) {
        rand_threshold = rand_.NextInt(0, max_threshold);
      }
218

219
220
221
222
      is_splittable_ = false;
      for (size_t out_i = 0; out_i < find_direction.size(); ++out_i) {
        auto dir = find_direction[out_i];
        auto start_pos = start_position[out_i];
Guolin Ke's avatar
Guolin Ke committed
223
        data_size_t min_data_per_group = meta_->config->min_data_per_group;
224
225
226
227
228
229
230
        data_size_t cnt_cur_group = 0;
        double sum_left_gradient = 0.0f;
        double sum_left_hessian = kEpsilon;
        data_size_t left_count = 0;
        for (int i = 0; i < used_bin && i < max_num_cat; ++i) {
          auto t = sorted_idx[start_pos];
          start_pos += dir;
231
232
233
          const auto grad = GET_GRAD(data_, t);
          const auto hess = GET_HESS(data_, t);
          data_size_t cnt = static_cast<data_size_t>(Common::RoundInt(hess * cnt_factor));
234

235
236
237
238
          sum_left_gradient += grad;
          sum_left_hessian += hess;
          left_count += cnt;
          cnt_cur_group += cnt;
239

Guolin Ke's avatar
Guolin Ke committed
240
          if (left_count < meta_->config->min_data_in_leaf
241
              || sum_left_hessian < meta_->config->min_sum_hessian_in_leaf) continue;
242
          data_size_t right_count = num_data - left_count;
Guolin Ke's avatar
Guolin Ke committed
243
          if (right_count < meta_->config->min_data_in_leaf || right_count < min_data_per_group) break;
244
245

          double sum_right_hessian = sum_hessian - sum_left_hessian;
Guolin Ke's avatar
Guolin Ke committed
246
          if (sum_right_hessian < meta_->config->min_sum_hessian_in_leaf) break;
247
248
249
250
251
252

          if (cnt_cur_group < min_data_per_group) continue;

          cnt_cur_group = 0;

          double sum_right_gradient = sum_gradient - sum_left_gradient;
253
254
          if (!meta_->config->extra_trees || i == rand_threshold) {
            double current_gain = GetSplitGains(sum_left_gradient, sum_left_hessian, sum_right_gradient, sum_right_hessian,
255
              meta_->config->lambda_l1, l2, meta_->config->max_delta_step, constraints, 0);
256
257
258
259
260
261
262
263
264
265
            if (current_gain <= min_gain_shift) continue;
            is_splittable_ = true;
            if (current_gain > best_gain) {
              best_left_count = left_count;
              best_sum_left_gradient = sum_left_gradient;
              best_sum_left_hessian = sum_left_hessian;
              best_threshold = i;
              best_gain = current_gain;
              best_dir = dir;
            }
266
          }
ChenZhiyong's avatar
ChenZhiyong committed
267
        }
268
269
      }
    }
270

271
    if (is_splittable_) {
Guolin Ke's avatar
Guolin Ke committed
272
      output->left_output = CalculateSplittedLeafOutput(best_sum_left_gradient, best_sum_left_hessian,
273
        meta_->config->lambda_l1, l2, meta_->config->max_delta_step, constraints);
274
275
276
      output->left_count = best_left_count;
      output->left_sum_gradient = best_sum_left_gradient;
      output->left_sum_hessian = best_sum_left_hessian - kEpsilon;
277
278
      output->right_output = CalculateSplittedLeafOutput(
        sum_gradient - best_sum_left_gradient, sum_hessian - best_sum_left_hessian,
279
        meta_->config->lambda_l1, l2, meta_->config->max_delta_step, constraints);
280
281
282
      output->right_count = num_data - best_left_count;
      output->right_sum_gradient = sum_gradient - best_sum_left_gradient;
      output->right_sum_hessian = sum_hessian - best_sum_left_hessian - kEpsilon;
Guolin Ke's avatar
Guolin Ke committed
283
      output->gain = best_gain - min_gain_shift;
284
285
286
      if (use_onehot) {
        output->num_cat_threshold = 1;
        output->cat_threshold = std::vector<uint32_t>(1, static_cast<uint32_t>(best_threshold));
ChenZhiyong's avatar
ChenZhiyong committed
287
      } else {
288
289
290
291
292
293
294
295
296
297
298
299
        output->num_cat_threshold = best_threshold + 1;
        output->cat_threshold = std::vector<uint32_t>(output->num_cat_threshold);
        if (best_dir == 1) {
          for (int i = 0; i < output->num_cat_threshold; ++i) {
            auto t = sorted_idx[i];
            output->cat_threshold[i] = t;
          }
        } else {
          for (int i = 0; i < output->num_cat_threshold; ++i) {
            auto t = sorted_idx[used_bin - 1 - i];
            output->cat_threshold[i] = t;
          }
ChenZhiyong's avatar
ChenZhiyong committed
300
301
        }
      }
Guolin Ke's avatar
Guolin Ke committed
302
      output->monotone_type = 0;
303
    }
304
305
  }

306
  void GatherInfoForThreshold(double sum_gradient, double sum_hessian,
307
    uint32_t threshold, data_size_t num_data, SplitInfo* output) {
308
    if (meta_->bin_type == BinType::NumericalBin) {
309
      GatherInfoForThresholdNumerical(sum_gradient, sum_hessian, threshold, num_data, output);
310
    } else {
311
      GatherInfoForThresholdCategorical(sum_gradient, sum_hessian, threshold, num_data, output);
312
313
314
315
    }
  }

  void GatherInfoForThresholdNumerical(double sum_gradient, double sum_hessian,
316
    uint32_t threshold, data_size_t num_data, SplitInfo* output) {
317
    double gain_shift = GetLeafSplitGain(sum_gradient, sum_hessian,
318
      meta_->config->lambda_l1, meta_->config->lambda_l2, meta_->config->max_delta_step);
Guolin Ke's avatar
Guolin Ke committed
319
    double min_gain_shift = gain_shift + meta_->config->min_gain_to_split;
320
321

    // do stuff here
322
    const int8_t offset = meta_->offset;
323
324
325
326
327
328

    double sum_right_gradient = 0.0f;
    double sum_right_hessian = kEpsilon;
    data_size_t right_count = 0;

    // set values
329
330
    bool use_na_as_missing = false;
    bool skip_default_bin = false;
331
332
    if (meta_->missing_type == MissingType::Zero) {
      skip_default_bin = true;
333
    } else if (meta_->missing_type == MissingType::NaN) {
334
335
336
      use_na_as_missing = true;
    }

337
338
    int t = meta_->num_bin - 1 - offset - use_na_as_missing;
    const int t_end = 1 - offset;
339
    const double cnt_factor = num_data / sum_hessian;
340
341
    // from right to left, and we don't need data in bin0
    for (; t >= t_end; --t) {
342
      if (static_cast<uint32_t>(t + offset) < threshold) { break; }
343
344

      // need to skip default bin
345
      if (skip_default_bin && (t + offset) == static_cast<int>(meta_->default_bin)) { continue; }
346
347
348
349
350
351
      const auto grad = GET_GRAD(data_, t);
      const auto hess = GET_HESS(data_, t);
      data_size_t cnt = static_cast<data_size_t>(Common::RoundInt(hess * cnt_factor));
      sum_right_gradient += grad;
      sum_right_hessian += hess;
      right_count += cnt;
352
353
354
355
356
    }
    double sum_left_gradient = sum_gradient - sum_right_gradient;
    double sum_left_hessian = sum_hessian - sum_right_hessian;
    data_size_t left_count = num_data - right_count;
    double current_gain = GetLeafSplitGain(sum_left_gradient, sum_left_hessian,
357
      meta_->config->lambda_l1, meta_->config->lambda_l2, meta_->config->max_delta_step)
358
      + GetLeafSplitGain(sum_right_gradient, sum_right_hessian,
359
          meta_->config->lambda_l1, meta_->config->lambda_l2, meta_->config->max_delta_step);
360
361
362
363

    // gain with split is worse than without split
    if (std::isnan(current_gain) || current_gain <= min_gain_shift) {
      output->gain = kMinScore;
364
      Log::Warning("'Forced Split' will be ignored since the gain getting worse. ");
365
      return;
366
    }
367
368
369
370

    // update split information
    output->threshold = threshold;
    output->left_output = CalculateSplittedLeafOutput(sum_left_gradient, sum_left_hessian,
371
      meta_->config->lambda_l1, meta_->config->lambda_l2, meta_->config->max_delta_step);
372
373
374
    output->left_count = left_count;
    output->left_sum_gradient = sum_left_gradient;
    output->left_sum_hessian = sum_left_hessian - kEpsilon;
375
376
377
    output->right_output = CalculateSplittedLeafOutput(
      sum_gradient - sum_left_gradient, sum_hessian - sum_left_hessian,
      meta_->config->lambda_l1, meta_->config->lambda_l2, meta_->config->max_delta_step);
378
379
380
381
382
383
384
385
386
    output->right_count = num_data - left_count;
    output->right_sum_gradient = sum_gradient - sum_left_gradient;
    output->right_sum_hessian = sum_hessian - sum_left_hessian - kEpsilon;
    output->gain = current_gain;
    output->gain -= min_gain_shift;
    output->default_left = true;
  }

  void GatherInfoForThresholdCategorical(double sum_gradient, double sum_hessian,
387
    uint32_t threshold, data_size_t num_data, SplitInfo* output) {
388
389
    // get SplitInfo for a given one-hot categorical split.
    output->default_left = false;
390
391
    double gain_shift = GetLeafSplitGain(sum_gradient, sum_hessian,
      meta_->config->lambda_l1, meta_->config->lambda_l2, meta_->config->max_delta_step);
Guolin Ke's avatar
Guolin Ke committed
392
    double min_gain_shift = gain_shift + meta_->config->min_gain_to_split;
393
394
395
396
397
398
399
    bool is_full_categorical = meta_->missing_type == MissingType::None;
    int used_bin = meta_->num_bin - 1 + is_full_categorical;
    if (threshold >= static_cast<uint32_t>(used_bin)) {
      output->gain = kMinScore;
      Log::Warning("Invalid categorical threshold split");
      return;
    }
400
401
402
403
    const double cnt_factor = num_data / sum_hessian;
    const auto grad = GET_GRAD(data_, threshold);
    const auto hess = GET_HESS(data_, threshold);
    data_size_t cnt = static_cast<data_size_t>(Common::RoundInt(hess * cnt_factor));
404

Guolin Ke's avatar
Guolin Ke committed
405
    double l2 = meta_->config->lambda_l2;
406
    data_size_t left_count = cnt;
407
    data_size_t right_count = num_data - left_count;
408
    double sum_left_hessian = hess + kEpsilon;
409
    double sum_right_hessian = sum_hessian - sum_left_hessian;
410
    double sum_left_gradient = grad;
411
412
413
    double sum_right_gradient = sum_gradient - sum_left_gradient;
    // current split gain
    double current_gain = GetLeafSplitGain(sum_right_gradient, sum_right_hessian,
414
      meta_->config->lambda_l1, l2, meta_->config->max_delta_step)
415
      + GetLeafSplitGain(sum_left_gradient, sum_left_hessian,
416
          meta_->config->lambda_l1, l2, meta_->config->max_delta_step);
417
418
    if (std::isnan(current_gain) || current_gain <= min_gain_shift) {
      output->gain = kMinScore;
419
      Log::Warning("'Forced Split' will be ignored since the gain getting worse.");
420
421
422
423
      return;
    }

    output->left_output = CalculateSplittedLeafOutput(sum_left_gradient, sum_left_hessian,
424
      meta_->config->lambda_l1, l2, meta_->config->max_delta_step);
425
426
427
428
    output->left_count = left_count;
    output->left_sum_gradient = sum_left_gradient;
    output->left_sum_hessian = sum_left_hessian - kEpsilon;
    output->right_output = CalculateSplittedLeafOutput(sum_right_gradient, sum_right_hessian,
429
      meta_->config->lambda_l1, l2, meta_->config->max_delta_step);
430
431
432
433
434
435
436
437
438
    output->right_count = right_count;
    output->right_sum_gradient = sum_gradient - sum_left_gradient;
    output->right_sum_hessian = sum_right_hessian - kEpsilon;
    output->gain = current_gain - min_gain_shift;
    output->num_cat_threshold = 1;
    output->cat_threshold = std::vector<uint32_t>(1, threshold);
  }


Guolin Ke's avatar
Guolin Ke committed
439
440
441
442
  /*!
  * \brief Binary size of this histogram
  */
  int SizeOfHistgram() const {
443
    return (meta_->num_bin - meta_->offset) * kHistEntrySize;
Guolin Ke's avatar
Guolin Ke committed
444
445
446
447
448
  }

  /*!
  * \brief Restore histogram from memory
  */
Guolin Ke's avatar
Guolin Ke committed
449
  void FromMemory(char* memory_data) {
450
    std::memcpy(data_, memory_data, (meta_->num_bin - meta_->offset) * kHistEntrySize);
Guolin Ke's avatar
Guolin Ke committed
451
452
453
454
455
456
457
458
459
460
461
462
  }

  /*!
  * \brief True if this histogram can be splitted
  */
  bool is_splittable() { return is_splittable_; }

  /*!
  * \brief Set splittable to this histogram
  */
  void set_is_splittable(bool val) { is_splittable_ = val; }

463
464
465
466
467
468
469
470
471
472
473
474
  static double ThresholdL1(double s, double l1) {
    const double reg_s = std::max(0.0, std::fabs(s) - l1);
    return Common::Sign(s) * reg_s;
  }

  static double CalculateSplittedLeafOutput(double sum_gradients, double sum_hessians, double l1, double l2, double max_delta_step) {
    double ret = -ThresholdL1(sum_gradients, l1) / (sum_hessians + l2);
    if (max_delta_step <= 0.0f || std::fabs(ret) <= max_delta_step) {
      return ret;
    } else {
      return Common::Sign(ret) * max_delta_step;
    }
Guolin Ke's avatar
Guolin Ke committed
475
476
  }

477
 private:
Guolin Ke's avatar
Guolin Ke committed
478
  static double GetSplitGains(double sum_left_gradients, double sum_left_hessians,
479
480
    double sum_right_gradients, double sum_right_hessians,
    double l1, double l2, double max_delta_step,
481
482
483
    const ConstraintEntry& constraints, int8_t monotone_constraint) {
    double left_output = CalculateSplittedLeafOutput(sum_left_gradients, sum_left_hessians, l1, l2, max_delta_step, constraints);
    double right_output = CalculateSplittedLeafOutput(sum_right_gradients, sum_right_hessians, l1, l2, max_delta_step, constraints);
Guolin Ke's avatar
Guolin Ke committed
484
485
486
487
488
489
490
491
    if (((monotone_constraint > 0) && (left_output > right_output)) ||
      ((monotone_constraint < 0) && (left_output < right_output))) {
      return 0;
    }
    return GetLeafSplitGainGivenOutput(sum_left_gradients, sum_left_hessians, l1, l2, left_output)
      + GetLeafSplitGainGivenOutput(sum_right_gradients, sum_right_hessians, l1, l2, right_output);
  }

Guolin Ke's avatar
Guolin Ke committed
492
  /*!
493
  * \brief Calculate the output of a leaf based on regularized sum_gradients and sum_hessians
Guolin Ke's avatar
Guolin Ke committed
494
495
496
497
  * \param sum_gradients
  * \param sum_hessians
  * \return leaf output
  */
Nikita Titov's avatar
Nikita Titov committed
498
499
500
  static double CalculateSplittedLeafOutput(double sum_gradients, double sum_hessians,
                                            double l1, double l2, double max_delta_step,
                                            const ConstraintEntry& constraints) {
501
    double ret = CalculateSplittedLeafOutput(sum_gradients, sum_hessians, l1, l2, max_delta_step);
502
503
504
505
    if (ret < constraints.min) {
      ret = constraints.min;
    } else if (ret > constraints.max) {
      ret = constraints.max;
Guolin Ke's avatar
Guolin Ke committed
506
507
    }
    return ret;
Guolin Ke's avatar
Guolin Ke committed
508
  }
Guolin Ke's avatar
Guolin Ke committed
509

Guolin Ke's avatar
Guolin Ke committed
510
511
512
513
514
515
  /*!
  * \brief Calculate the split gain based on regularized sum_gradients and sum_hessians
  * \param sum_gradients
  * \param sum_hessians
  * \return split gain
  */
516
517
518
  static double GetLeafSplitGain(double sum_gradients, double sum_hessians, double l1, double l2, double max_delta_step) {
    double output = CalculateSplittedLeafOutput(sum_gradients, sum_hessians, l1, l2, max_delta_step);
    return GetLeafSplitGainGivenOutput(sum_gradients, sum_hessians, l1, l2, output);
Guolin Ke's avatar
Guolin Ke committed
519
520
521
522
523
524
  }

  static double GetLeafSplitGainGivenOutput(double sum_gradients, double sum_hessians, double l1, double l2, double output) {
    const double sg_l1 = ThresholdL1(sum_gradients, l1);
    return -(2.0 * sg_l1 * output + (sum_hessians + l2) * output * output);
  }
Guolin Ke's avatar
Guolin Ke committed
525

526
  template<bool is_rand>
527
  void FindBestThresholdSequence(double sum_gradient, double sum_hessian, data_size_t num_data, const ConstraintEntry& constraints,
528
                                 double min_gain_shift, SplitInfo* output, int dir, bool skip_default_bin, bool use_na_as_missing, int rand_threshold) {
529
    const int8_t offset = meta_->offset;
Guolin Ke's avatar
Guolin Ke committed
530
531
532
533
534
535

    double best_sum_left_gradient = NAN;
    double best_sum_left_hessian = NAN;
    double best_gain = kMinScore;
    data_size_t best_left_count = 0;
    uint32_t best_threshold = static_cast<uint32_t>(meta_->num_bin);
536
    const double cnt_factor = num_data / sum_hessian;
Guolin Ke's avatar
Guolin Ke committed
537
538
539
540
541
    if (dir == -1) {
      double sum_right_gradient = 0.0f;
      double sum_right_hessian = kEpsilon;
      data_size_t right_count = 0;

542
543
      int t = meta_->num_bin - 1 - offset - use_na_as_missing;
      const int t_end = 1 - offset;
Guolin Ke's avatar
Guolin Ke committed
544
545
546
547

      // from right to left, and we don't need data in bin0
      for (; t >= t_end; --t) {
        // need to skip default bin
548
        if (skip_default_bin && (t + offset) == static_cast<int>(meta_->default_bin)) { continue; }
Guolin Ke's avatar
Guolin Ke committed
549

550
551
552
553
554
555
        const auto grad = GET_GRAD(data_, t);
        const auto hess = GET_HESS(data_, t);
        data_size_t cnt = static_cast<data_size_t>(Common::RoundInt(hess * cnt_factor));
        sum_right_gradient += grad;
        sum_right_hessian += hess;
        right_count += cnt;
Guolin Ke's avatar
Guolin Ke committed
556
        // if data not enough, or sum hessian too small
Guolin Ke's avatar
Guolin Ke committed
557
        if (right_count < meta_->config->min_data_in_leaf
558
            || sum_right_hessian < meta_->config->min_sum_hessian_in_leaf) continue;
Guolin Ke's avatar
Guolin Ke committed
559
560
        data_size_t left_count = num_data - right_count;
        // if data not enough
Guolin Ke's avatar
Guolin Ke committed
561
        if (left_count < meta_->config->min_data_in_leaf) break;
Guolin Ke's avatar
Guolin Ke committed
562
563
564

        double sum_left_hessian = sum_hessian - sum_right_hessian;
        // if sum hessian too small
Guolin Ke's avatar
Guolin Ke committed
565
        if (sum_left_hessian < meta_->config->min_sum_hessian_in_leaf) break;
Guolin Ke's avatar
Guolin Ke committed
566
567

        double sum_left_gradient = sum_gradient - sum_right_gradient;
568
569
570
571
        if (!is_rand || t - 1 + offset == rand_threshold) {
          // current split gain
          double current_gain = GetSplitGains(sum_left_gradient, sum_left_hessian, sum_right_gradient, sum_right_hessian,
                                              meta_->config->lambda_l1, meta_->config->lambda_l2, meta_->config->max_delta_step,
572
                                              constraints, meta_->monotone_type);
573
574
          // gain with split is worse than without split
          if (current_gain <= min_gain_shift) continue;
Guolin Ke's avatar
Guolin Ke committed
575

576
577
578
579
580
581
582
583
584
585
586
          // mark to is splittable
          is_splittable_ = true;
          // better split point
          if (current_gain > best_gain) {
            best_left_count = left_count;
            best_sum_left_gradient = sum_left_gradient;
            best_sum_left_hessian = sum_left_hessian;
            // left is <= threshold, right is > threshold.  so this is t-1
            best_threshold = static_cast<uint32_t>(t - 1 + offset);
            best_gain = current_gain;
          }
Guolin Ke's avatar
Guolin Ke committed
587
588
        }
      }
ChenZhiyong's avatar
ChenZhiyong committed
589
    } else {
Guolin Ke's avatar
Guolin Ke committed
590
591
592
593
594
      double sum_left_gradient = 0.0f;
      double sum_left_hessian = kEpsilon;
      data_size_t left_count = 0;

      int t = 0;
595
      const int t_end = meta_->num_bin - 2 - offset;
Guolin Ke's avatar
Guolin Ke committed
596

597
      if (use_na_as_missing && offset == 1) {
Guolin Ke's avatar
Guolin Ke committed
598
599
600
        sum_left_gradient = sum_gradient;
        sum_left_hessian = sum_hessian - kEpsilon;
        left_count = num_data;
601
        for (int i = 0; i < meta_->num_bin - offset; ++i) {
602
603
604
605
606
607
          const auto grad = GET_GRAD(data_, i);
          const auto hess = GET_HESS(data_, i);
          data_size_t cnt = static_cast<data_size_t>(Common::RoundInt(hess * cnt_factor));
          sum_left_gradient -= grad;
          sum_left_hessian -= hess;
          left_count -= cnt;
Guolin Ke's avatar
Guolin Ke committed
608
609
610
611
        }
        t = -1;
      }

Guolin Ke's avatar
Guolin Ke committed
612
613
      for (; t <= t_end; ++t) {
        // need to skip default bin
614
        if (skip_default_bin && (t + offset) == static_cast<int>(meta_->default_bin)) { continue; }
Guolin Ke's avatar
Guolin Ke committed
615
        if (t >= 0) {
616
617
618
          sum_left_gradient += GET_GRAD(data_, t);
          sum_left_hessian += GET_HESS(data_, t);
          left_count += static_cast<data_size_t>(Common::RoundInt(GET_HESS(data_, t) * cnt_factor));
Guolin Ke's avatar
Guolin Ke committed
619
        }
Guolin Ke's avatar
Guolin Ke committed
620
        // if data not enough, or sum hessian too small
Guolin Ke's avatar
Guolin Ke committed
621
        if (left_count < meta_->config->min_data_in_leaf
622
            || sum_left_hessian < meta_->config->min_sum_hessian_in_leaf) continue;
Guolin Ke's avatar
Guolin Ke committed
623
624
        data_size_t right_count = num_data - left_count;
        // if data not enough
Guolin Ke's avatar
Guolin Ke committed
625
        if (right_count < meta_->config->min_data_in_leaf) break;
Guolin Ke's avatar
Guolin Ke committed
626
627
628

        double sum_right_hessian = sum_hessian - sum_left_hessian;
        // if sum hessian too small
Guolin Ke's avatar
Guolin Ke committed
629
        if (sum_right_hessian < meta_->config->min_sum_hessian_in_leaf) break;
Guolin Ke's avatar
Guolin Ke committed
630
631

        double sum_right_gradient = sum_gradient - sum_left_gradient;
632
633
634
635
        if (!is_rand || t + offset == rand_threshold) {
          // current split gain
          double current_gain = GetSplitGains(sum_left_gradient, sum_left_hessian, sum_right_gradient, sum_right_hessian,
                                              meta_->config->lambda_l1, meta_->config->lambda_l2, meta_->config->max_delta_step,
636
                                              constraints, meta_->monotone_type);
637
638
          // gain with split is worse than without split
          if (current_gain <= min_gain_shift) continue;
Guolin Ke's avatar
Guolin Ke committed
639

640
641
642
643
644
645
646
647
648
649
          // mark to is splittable
          is_splittable_ = true;
          // better split point
          if (current_gain > best_gain) {
            best_left_count = left_count;
            best_sum_left_gradient = sum_left_gradient;
            best_sum_left_hessian = sum_left_hessian;
            best_threshold = static_cast<uint32_t>(t + offset);
            best_gain = current_gain;
          }
Guolin Ke's avatar
Guolin Ke committed
650
651
652
653
654
655
656
        }
      }
    }

    if (is_splittable_ && best_gain > output->gain) {
      // update split information
      output->threshold = best_threshold;
657
658
659
660
      output->left_output = CalculateSplittedLeafOutput(
          best_sum_left_gradient, best_sum_left_hessian,
          meta_->config->lambda_l1, meta_->config->lambda_l2,
          meta_->config->max_delta_step, constraints);
Guolin Ke's avatar
Guolin Ke committed
661
662
663
      output->left_count = best_left_count;
      output->left_sum_gradient = best_sum_left_gradient;
      output->left_sum_hessian = best_sum_left_hessian - kEpsilon;
664
665
      output->right_output = CalculateSplittedLeafOutput(
        sum_gradient - best_sum_left_gradient, sum_hessian - best_sum_left_hessian,
666
        meta_->config->lambda_l1, meta_->config->lambda_l2, meta_->config->max_delta_step,
667
        constraints);
Guolin Ke's avatar
Guolin Ke committed
668
669
670
671
      output->right_count = num_data - best_left_count;
      output->right_sum_gradient = sum_gradient - best_sum_left_gradient;
      output->right_sum_hessian = sum_hessian - best_sum_left_hessian - kEpsilon;
      output->gain = best_gain;
Guolin Ke's avatar
Guolin Ke committed
672
      output->default_left = dir == -1;
Guolin Ke's avatar
Guolin Ke committed
673
674
675
    }
  }

Guolin Ke's avatar
Guolin Ke committed
676
  const FeatureMetainfo* meta_;
Guolin Ke's avatar
Guolin Ke committed
677
  /*! \brief sum of gradient of each bin */
678
  hist_t* data_;
Guolin Ke's avatar
Guolin Ke committed
679
  bool is_splittable_ = true;
680
681
  /*! \brief random number generator for extremely randomized trees */
  Random rand_;
682

Nikita Titov's avatar
Nikita Titov committed
683
684
  std::function<void(double, double, data_size_t, const ConstraintEntry&, SplitInfo*)>
    find_best_threshold_fun_;
Guolin Ke's avatar
Guolin Ke committed
685
};
Nikita Titov's avatar
Nikita Titov committed
686

Guolin Ke's avatar
Guolin Ke committed
687
class HistogramPool {
688
 public:
Guolin Ke's avatar
Guolin Ke committed
689
690
691
692
  /*!
  * \brief Constructor
  */
  HistogramPool() {
Guolin Ke's avatar
Guolin Ke committed
693
694
    cache_size_ = 0;
    total_size_ = 0;
Guolin Ke's avatar
Guolin Ke committed
695
  }
696

Guolin Ke's avatar
Guolin Ke committed
697
698
699
700
701
  /*!
  * \brief Destructor
  */
  ~HistogramPool() {
  }
702

Guolin Ke's avatar
Guolin Ke committed
703
704
705
706
707
  /*!
  * \brief Reset pool size
  * \param cache_size Max cache size
  * \param total_size Total size will be used
  */
Guolin Ke's avatar
Guolin Ke committed
708
  void Reset(int cache_size, int total_size) {
Guolin Ke's avatar
Guolin Ke committed
709
710
711
712
713
714
715
716
717
    cache_size_ = cache_size;
    // at least need 2 bucket to store smaller leaf and larger leaf
    CHECK(cache_size_ >= 2);
    total_size_ = total_size;
    if (cache_size_ > total_size_) {
      cache_size_ = total_size_;
    }
    is_enough_ = (cache_size_ == total_size_);
    if (!is_enough_) {
718
719
720
      mapper_.resize(total_size_);
      inverse_mapper_.resize(cache_size_);
      last_used_time_.resize(cache_size_);
Guolin Ke's avatar
Guolin Ke committed
721
722
723
      ResetMap();
    }
  }
724

Guolin Ke's avatar
Guolin Ke committed
725
726
727
728
729
730
731
732
733
734
735
736
  /*!
  * \brief Reset mapper
  */
  void ResetMap() {
    if (!is_enough_) {
      cur_time_ = 0;
      std::fill(mapper_.begin(), mapper_.end(), -1);
      std::fill(inverse_mapper_.begin(), inverse_mapper_.end(), -1);
      std::fill(last_used_time_.begin(), last_used_time_.end(), 0);
    }
  }

737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
  static void SetFeatureInfo(const Dataset* train_data, const Config* config, std::vector<FeatureMetainfo>* feature_meta) {
    auto& ref_feature_meta = *feature_meta;
    const int num_feature = train_data->num_features();
    ref_feature_meta.resize(num_feature);
    #pragma omp parallel for schedule(static)
    for (int i = 0; i < num_feature; ++i) {
      ref_feature_meta[i].num_bin = train_data->FeatureNumBin(i);
      ref_feature_meta[i].default_bin = train_data->FeatureBinMapper(i)->GetDefaultBin();
      ref_feature_meta[i].missing_type = train_data->FeatureBinMapper(i)->missing_type();
      const int real_fidx = train_data->RealFeatureIndex(i);
      if (!config->monotone_constraints.empty()) {
        ref_feature_meta[i].monotone_type = config->monotone_constraints[real_fidx];
      } else {
        ref_feature_meta[i].monotone_type = 0;
      }
      if (!config->feature_contri.empty()) {
        ref_feature_meta[i].penalty = config->feature_contri[real_fidx];
      } else {
        ref_feature_meta[i].penalty = 1.0;
      }
      if (train_data->FeatureBinMapper(i)->GetMostFreqBin() == 0) {
        ref_feature_meta[i].offset = 1;
      } else {
        ref_feature_meta[i].offset = 0;
      }
      ref_feature_meta[i].config = config;
      ref_feature_meta[i].bin_type = train_data->FeatureBinMapper(i)->bin_type();
    }
  }

  static void SetFeatureInfoConfig(const Dataset* train_data, const Config* config, std::vector<FeatureMetainfo>* feature_meta) {
    auto& ref_feature_meta = *feature_meta;
    const int num_feature = train_data->num_features();
    ref_feature_meta.resize(num_feature);
    #pragma omp parallel for schedule(static)
    for (int i = 0; i < num_feature; ++i) {
      const int real_fidx = train_data->RealFeatureIndex(i);
      if (!config->monotone_constraints.empty()) {
        ref_feature_meta[i].monotone_type = config->monotone_constraints[real_fidx];
      } else {
        ref_feature_meta[i].monotone_type = 0;
      }
      if (!config->feature_contri.empty()) {
        ref_feature_meta[i].penalty = config->feature_contri[real_fidx];
      } else {
        ref_feature_meta[i].penalty = 1.0;
      }
      ref_feature_meta[i].config = config;
    }
  }
787
  void DynamicChangeSize(const Dataset* train_data, bool is_hist_colwise, const Config* config, int cache_size, int total_size) {
Guolin Ke's avatar
Guolin Ke committed
788
    if (feature_metas_.empty()) {
789
      SetFeatureInfo(train_data, config, &feature_metas_);
790
      uint64_t bin_cnt_over_features = 0;
791
      for (int i = 0; i < train_data->num_features(); ++i) {
792
        bin_cnt_over_features += static_cast<uint64_t>(feature_metas_[i].num_bin);
Guolin Ke's avatar
Guolin Ke committed
793
      }
794
      Log::Info("Total Bins %d", bin_cnt_over_features);
Guolin Ke's avatar
Guolin Ke committed
795
    }
Guolin Ke's avatar
Guolin Ke committed
796
    int old_cache_size = static_cast<int>(pool_.size());
Guolin Ke's avatar
Guolin Ke committed
797
    Reset(cache_size, total_size);
Guolin Ke's avatar
Guolin Ke committed
798
799
800
801
802

    if (cache_size > old_cache_size) {
      pool_.resize(cache_size);
      data_.resize(cache_size);
    }
803
    int num_total_bin = static_cast<int>(train_data->NumTotalBin());
Guolin Ke's avatar
Guolin Ke committed
804

805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
    std::vector<int> offsets;
    if (is_hist_colwise) {
      int offset = 0;
      for (int j = 0; j < train_data->num_features(); ++j) {
        offset += train_data->SubFeatureBinOffset(j);
        offsets.push_back(offset);
        auto num_bin = train_data->FeatureNumBin(j);
        if (train_data->FeatureBinMapper(j)->GetMostFreqBin() == 0) {
          num_bin -= 1;
        }
        offset += num_bin;
      }
    } else {
      num_total_bin = 1;
      for (int j = 0; j < train_data->num_features(); ++j) {
        offsets.push_back(num_total_bin);
        num_total_bin += train_data->FeatureBinMapper(j)->num_bin();
        if (train_data->FeatureBinMapper(j)->GetMostFreqBin() == 0) {
          num_total_bin -= 1;
        }
      }
    }
827
    OMP_INIT_EX();
Guolin Ke's avatar
Guolin Ke committed
828
    #pragma omp parallel for schedule(static)
Guolin Ke's avatar
Guolin Ke committed
829
    for (int i = old_cache_size; i < cache_size; ++i) {
830
      OMP_LOOP_EX_BEGIN();
Guolin Ke's avatar
Guolin Ke committed
831
      pool_[i].reset(new FeatureHistogram[train_data->num_features()]);
832
      data_[i].resize(num_total_bin * 2);
Guolin Ke's avatar
Guolin Ke committed
833
      for (int j = 0; j < train_data->num_features(); ++j) {
834
        pool_[i][j].Init(data_[i].data() + offsets[j] * 2, &feature_metas_[j]);
Guolin Ke's avatar
Guolin Ke committed
835
      }
836
      OMP_LOOP_EX_END();
Guolin Ke's avatar
Guolin Ke committed
837
    }
838
    OMP_THROW_EX();
Guolin Ke's avatar
Guolin Ke committed
839
840
  }

841
842
  void ResetConfig(const Dataset* train_data, const Config* config) {
    SetFeatureInfoConfig(train_data, config, &feature_metas_);
Guolin Ke's avatar
Guolin Ke committed
843
  }
844

Guolin Ke's avatar
Guolin Ke committed
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
  /*!
  * \brief Get data for the specific index
  * \param idx which index want to get
  * \param out output data will store into this
  * \return True if this index is in the pool, False if this index is not in the pool
  */
  bool Get(int idx, FeatureHistogram** out) {
    if (is_enough_) {
      *out = pool_[idx].get();
      return true;
    } else if (mapper_[idx] >= 0) {
      int slot = mapper_[idx];
      *out = pool_[slot].get();
      last_used_time_[slot] = ++cur_time_;
      return true;
    } else {
861
      // choose the least used slot
Guolin Ke's avatar
Guolin Ke committed
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
      int slot = static_cast<int>(ArrayArgs<int>::ArgMin(last_used_time_));
      *out = pool_[slot].get();
      last_used_time_[slot] = ++cur_time_;

      // reset previous mapper
      if (inverse_mapper_[slot] >= 0) mapper_[inverse_mapper_[slot]] = -1;

      // update current mapper
      mapper_[idx] = slot;
      inverse_mapper_[slot] = idx;
      return false;
    }
  }

  /*!
  * \brief Move data from one index to another index
  * \param src_idx
  * \param dst_idx
  */
  void Move(int src_idx, int dst_idx) {
    if (is_enough_) {
      std::swap(pool_[src_idx], pool_[dst_idx]);
      return;
    }
    if (mapper_[src_idx] < 0) {
      return;
    }
    // get slot of src idx
    int slot = mapper_[src_idx];
    // reset src_idx
    mapper_[src_idx] = -1;

    // move to dst idx
    mapper_[dst_idx] = slot;
    last_used_time_[slot] = ++cur_time_;
    inverse_mapper_[slot] = dst_idx;
  }
899

900
 private:
Guolin Ke's avatar
Guolin Ke committed
901
  std::vector<std::unique_ptr<FeatureHistogram[]>> pool_;
902
  std::vector<std::vector<hist_t, Common::AlignmentAllocator<hist_t, kAlignedSize>>> data_;
Guolin Ke's avatar
Guolin Ke committed
903
  std::vector<FeatureMetainfo> feature_metas_;
Guolin Ke's avatar
Guolin Ke committed
904
905
906
907
908
909
910
911
912
  int cache_size_;
  int total_size_;
  bool is_enough_ = false;
  std::vector<int> mapper_;
  std::vector<int> inverse_mapper_;
  std::vector<int> last_used_time_;
  int cur_time_ = 0;
};

Guolin Ke's avatar
Guolin Ke committed
913
}  // namespace LightGBM
Guolin Ke's avatar
Guolin Ke committed
914
#endif   // LightGBM_TREELEARNER_FEATURE_HISTOGRAM_HPP_