serial_tree_learner.cpp 50.1 KB
Newer Older
1
2
3
4
/*!
 * Copyright (c) 2016 Microsoft Corporation. All rights reserved.
 * Licensed under the MIT License. See LICENSE file in the project root for license information.
 */
Guolin Ke's avatar
Guolin Ke committed
5
6
#include "serial_tree_learner.h"

7
8
9
10
11
#include <LightGBM/network.h>
#include <LightGBM/objective_function.h>
#include <LightGBM/utils/array_args.h>
#include <LightGBM/utils/common.h>

12
13
#include <algorithm>
#include <queue>
14
#include <set>
15
16
17
#include <unordered_map>
#include <utility>

18
19
#include "cost_effective_gradient_boosting.hpp"

Guolin Ke's avatar
Guolin Ke committed
20
21
namespace LightGBM {

Guolin Ke's avatar
Guolin Ke committed
22
SerialTreeLearner::SerialTreeLearner(const Config* config)
23
    : config_(config), col_sampler_(config) {
24
  gradient_discretizer_ = nullptr;
Guolin Ke's avatar
Guolin Ke committed
25
26
27
28
29
}

SerialTreeLearner::~SerialTreeLearner() {
}

30
void SerialTreeLearner::Init(const Dataset* train_data, bool is_constant_hessian) {
Guolin Ke's avatar
Guolin Ke committed
31
32
33
  train_data_ = train_data;
  num_data_ = train_data_->num_data();
  num_features_ = train_data_->num_features();
34
35
  int max_cache_size = 0;
  // Get the max size of pool
Guolin Ke's avatar
Guolin Ke committed
36
37
  if (config_->histogram_pool_size <= 0) {
    max_cache_size = config_->num_leaves;
38
39
40
  } else {
    size_t total_histogram_size = 0;
    for (int i = 0; i < train_data_->num_features(); ++i) {
41
      total_histogram_size += kHistEntrySize * train_data_->FeatureNumBin(i);
42
    }
Guolin Ke's avatar
Guolin Ke committed
43
    max_cache_size = static_cast<int>(config_->histogram_pool_size * 1024 * 1024 / total_histogram_size);
44
45
  }
  // at least need 2 leaves
Guolin Ke's avatar
Guolin Ke committed
46
  max_cache_size = std::max(2, max_cache_size);
Guolin Ke's avatar
Guolin Ke committed
47
  max_cache_size = std::min(max_cache_size, config_->num_leaves);
Guolin Ke's avatar
Guolin Ke committed
48

Guolin Ke's avatar
Guolin Ke committed
49
  // push split information for all leaves
Guolin Ke's avatar
Guolin Ke committed
50
  best_split_per_leaf_.resize(config_->num_leaves);
51
  constraints_.reset(LeafConstraintsBase::Create(config_, config_->num_leaves, train_data_->num_features()));
Guolin Ke's avatar
Guolin Ke committed
52

wxchan's avatar
wxchan committed
53
  // initialize splits for leaf
Guolin Ke's avatar
Guolin Ke committed
54
55
  smaller_leaf_splits_.reset(new LeafSplits(train_data_->num_data(), config_));
  larger_leaf_splits_.reset(new LeafSplits(train_data_->num_data(), config_));
Guolin Ke's avatar
Guolin Ke committed
56
57

  // initialize data partition
Guolin Ke's avatar
Guolin Ke committed
58
  data_partition_.reset(new DataPartition(num_data_, config_->num_leaves));
59
  col_sampler_.SetTrainingData(train_data_);
Guolin Ke's avatar
Guolin Ke committed
60
  // initialize ordered gradients and hessians
Guolin Ke's avatar
Guolin Ke committed
61
62
  ordered_gradients_.resize(num_data_);
  ordered_hessians_.resize(num_data_);
63

64
65
66
67
68
  if (config_->use_quantized_grad) {
    gradient_discretizer_.reset(new GradientDiscretizer(config_->num_grad_quant_bins, config_->num_iterations, config_->seed, is_constant_hessian, config_->stochastic_rounding));
    gradient_discretizer_->Init(num_data_, config_->num_leaves, num_features_, train_data_);
  }

69
  GetShareStates(train_data_, is_constant_hessian, true);
70
71
72
73
  histogram_pool_.DynamicChangeSize(train_data_,
  share_state_->num_hist_total_bin(),
  share_state_->feature_hist_offsets(),
  config_, max_cache_size, config_->num_leaves);
74
  Log::Info("Number of data points in the train set: %d, number of used features: %d", num_data_, num_features_);
75
76
77
  if (CostEfficientGradientBoosting::IsEnable(config_)) {
    cegb_.reset(new CostEfficientGradientBoosting(this));
    cegb_->Init();
78
  }
Guolin Ke's avatar
Guolin Ke committed
79
80
}

81
82
83
void SerialTreeLearner::GetShareStates(const Dataset* dataset,
                                       bool is_constant_hessian,
                                       bool is_first_time) {
84
  if (is_first_time) {
85
86
87
    if (config_->use_quantized_grad) {
      share_state_.reset(dataset->GetShareStates<true, 32>(
        reinterpret_cast<score_t*>(gradient_discretizer_->ordered_int_gradients_and_hessians()), nullptr,
88
        col_sampler_.is_feature_used_bytree(), is_constant_hessian,
89
90
91
92
93
94
95
        config_->force_col_wise, config_->force_row_wise, config_->num_grad_quant_bins));
    } else {
      share_state_.reset(dataset->GetShareStates<false, 0>(
          ordered_gradients_.data(), ordered_hessians_.data(),
          col_sampler_.is_feature_used_bytree(), is_constant_hessian,
          config_->force_col_wise, config_->force_row_wise, config_->num_grad_quant_bins));
    }
96
  } else {
Nikita Titov's avatar
Nikita Titov committed
97
    CHECK_NOTNULL(share_state_);
98
    // cannot change is_hist_col_wise during training
99
100
101
102
103
104
105
106
107
108
109
    if (config_->use_quantized_grad) {
      share_state_.reset(dataset->GetShareStates<true, 32>(
          reinterpret_cast<score_t*>(gradient_discretizer_->ordered_int_gradients_and_hessians()), nullptr,
          col_sampler_.is_feature_used_bytree(), is_constant_hessian,
          share_state_->is_col_wise, !share_state_->is_col_wise, config_->num_grad_quant_bins));
    } else {
      share_state_.reset(dataset->GetShareStates<false, 0>(
          ordered_gradients_.data(), ordered_hessians_.data(), col_sampler_.is_feature_used_bytree(),
          is_constant_hessian, share_state_->is_col_wise,
          !share_state_->is_col_wise, config_->num_grad_quant_bins));
    }
110
  }
Nikita Titov's avatar
Nikita Titov committed
111
  CHECK_NOTNULL(share_state_);
112
113
}

114
115
116
void SerialTreeLearner::ResetTrainingDataInner(const Dataset* train_data,
                                               bool is_constant_hessian,
                                               bool reset_multi_val_bin) {
Guolin Ke's avatar
Guolin Ke committed
117
118
  train_data_ = train_data;
  num_data_ = train_data_->num_data();
119
  CHECK_EQ(num_features_, train_data_->num_features());
Guolin Ke's avatar
Guolin Ke committed
120
121
122
123
124
125
126

  // initialize splits for leaf
  smaller_leaf_splits_->ResetNumData(num_data_);
  larger_leaf_splits_->ResetNumData(num_data_);

  // initialize data partition
  data_partition_->ResetNumData(num_data_);
127
  if (reset_multi_val_bin) {
128
    col_sampler_.SetTrainingData(train_data_);
129
130
    GetShareStates(train_data_, is_constant_hessian, false);
  }
131

Guolin Ke's avatar
Guolin Ke committed
132
133
134
  // initialize ordered gradients and hessians
  ordered_gradients_.resize(num_data_);
  ordered_hessians_.resize(num_data_);
135
136
137
  if (cegb_ != nullptr) {
    cegb_->Init();
  }
Guolin Ke's avatar
Guolin Ke committed
138
}
Guolin Ke's avatar
Guolin Ke committed
139

Guolin Ke's avatar
Guolin Ke committed
140
141
142
void SerialTreeLearner::ResetConfig(const Config* config) {
  if (config_->num_leaves != config->num_leaves) {
    config_ = config;
Guolin Ke's avatar
Guolin Ke committed
143
144
    int max_cache_size = 0;
    // Get the max size of pool
Guolin Ke's avatar
Guolin Ke committed
145
146
    if (config->histogram_pool_size <= 0) {
      max_cache_size = config_->num_leaves;
Guolin Ke's avatar
Guolin Ke committed
147
148
149
    } else {
      size_t total_histogram_size = 0;
      for (int i = 0; i < train_data_->num_features(); ++i) {
150
        total_histogram_size += kHistEntrySize * train_data_->FeatureNumBin(i);
Guolin Ke's avatar
Guolin Ke committed
151
      }
Guolin Ke's avatar
Guolin Ke committed
152
      max_cache_size = static_cast<int>(config_->histogram_pool_size * 1024 * 1024 / total_histogram_size);
Guolin Ke's avatar
Guolin Ke committed
153
154
155
    }
    // at least need 2 leaves
    max_cache_size = std::max(2, max_cache_size);
Guolin Ke's avatar
Guolin Ke committed
156
    max_cache_size = std::min(max_cache_size, config_->num_leaves);
157
158
159
160
    histogram_pool_.DynamicChangeSize(train_data_,
    share_state_->num_hist_total_bin(),
    share_state_->feature_hist_offsets(),
    config_, max_cache_size, config_->num_leaves);
Guolin Ke's avatar
Guolin Ke committed
161
162

    // push split information for all leaves
Guolin Ke's avatar
Guolin Ke committed
163
164
    best_split_per_leaf_.resize(config_->num_leaves);
    data_partition_->ResetLeaves(config_->num_leaves);
Guolin Ke's avatar
Guolin Ke committed
165
  } else {
Guolin Ke's avatar
Guolin Ke committed
166
    config_ = config;
Guolin Ke's avatar
Guolin Ke committed
167
  }
168
  col_sampler_.SetConfig(config_);
169
  histogram_pool_.ResetConfig(train_data_, config_);
170
  if (CostEfficientGradientBoosting::IsEnable(config_)) {
171
172
173
    if (cegb_ == nullptr) {
      cegb_.reset(new CostEfficientGradientBoosting(this));
    }
174
175
    cegb_->Init();
  }
176
  constraints_.reset(LeafConstraintsBase::Create(config_, config_->num_leaves, train_data_->num_features()));
Guolin Ke's avatar
Guolin Ke committed
177
178
}

179
Tree* SerialTreeLearner::Train(const score_t* gradients, const score_t *hessians, bool /*is_first_tree*/) {
180
  Common::FunctionTimer fun_timer("SerialTreeLearner::Train", global_timer);
Guolin Ke's avatar
Guolin Ke committed
181
182
  gradients_ = gradients;
  hessians_ = hessians;
183
  int num_threads = OMP_NUM_THREADS();
Nikita Titov's avatar
Nikita Titov committed
184
  if (share_state_->num_threads != num_threads && share_state_->num_threads > 0) {
185
    Log::Warning(
Nikita Titov's avatar
Nikita Titov committed
186
187
        "Detected that num_threads changed during training (from %d to %d), "
        "it may cause unexpected errors.",
188
189
190
        share_state_->num_threads, num_threads);
  }
  share_state_->num_threads = num_threads;
Nikita Titov's avatar
Nikita Titov committed
191

192
193
194
195
  if (config_->use_quantized_grad) {
    gradient_discretizer_->DiscretizeGradients(num_data_, gradients_, hessians_);
  }

Guolin Ke's avatar
Guolin Ke committed
196
197
  // some initial works before training
  BeforeTrain();
Guolin Ke's avatar
Guolin Ke committed
198

199
  bool track_branch_features = !(config_->interaction_constraints_vector.empty());
200
  auto tree = std::unique_ptr<Tree>(new Tree(config_->num_leaves, track_branch_features, false));
Guolin Ke's avatar
Guolin Ke committed
201
202
  auto tree_ptr = tree.get();
  constraints_->ShareTreePointer(tree_ptr);
203

204
205
206
207
208
209
  // set the root value by hand, as it is not handled by splits
  tree->SetLeafOutput(0, FeatureHistogram::CalculateSplittedLeafOutput<true, true, true, false>(
    smaller_leaf_splits_->sum_gradients(), smaller_leaf_splits_->sum_hessians(),
    config_->lambda_l1, config_->lambda_l2, config_->max_delta_step,
    BasicConstraint(), config_->path_smooth, static_cast<data_size_t>(num_data_), 0));

Guolin Ke's avatar
Guolin Ke committed
210
211
  // root leaf
  int left_leaf = 0;
212
  int cur_depth = 1;
Guolin Ke's avatar
Guolin Ke committed
213
214
  // only root leaf can be splitted on first time
  int right_leaf = -1;
215

Guolin Ke's avatar
Guolin Ke committed
216
  int init_splits = ForceSplits(tree_ptr, &left_leaf, &right_leaf, &cur_depth);
217

Guolin Ke's avatar
Guolin Ke committed
218
  for (int split = init_splits; split < config_->num_leaves - 1; ++split) {
Guolin Ke's avatar
Guolin Ke committed
219
    // some initial works before finding best split
Guolin Ke's avatar
Guolin Ke committed
220
    if (BeforeFindBestSplit(tree_ptr, left_leaf, right_leaf)) {
Guolin Ke's avatar
Guolin Ke committed
221
      // find best threshold for every feature
Guolin Ke's avatar
Guolin Ke committed
222
      FindBestSplits(tree_ptr);
223
    }
Guolin Ke's avatar
Guolin Ke committed
224
225
226
227
228
229
    // Get a leaf with max split gain
    int best_leaf = static_cast<int>(ArrayArgs<SplitInfo>::ArgMax(best_split_per_leaf_));
    // Get split information for best leaf
    const SplitInfo& best_leaf_SplitInfo = best_split_per_leaf_[best_leaf];
    // cannot split, quit
    if (best_leaf_SplitInfo.gain <= 0.0) {
Guolin Ke's avatar
Guolin Ke committed
230
      Log::Warning("No further splits with positive gain, best gain: %f", best_leaf_SplitInfo.gain);
Guolin Ke's avatar
Guolin Ke committed
231
232
233
      break;
    }
    // split tree with best leaf
Guolin Ke's avatar
Guolin Ke committed
234
    Split(tree_ptr, best_leaf, &left_leaf, &right_leaf);
235
    cur_depth = std::max(cur_depth, tree->leaf_depth(left_leaf));
Guolin Ke's avatar
Guolin Ke committed
236
  }
237

238
239
240
241
242
  if (config_->use_quantized_grad && config_->quant_train_renew_leaf) {
    gradient_discretizer_->RenewIntGradTreeOutput(tree.get(), config_, data_partition_.get(), gradients_, hessians_,
      [this] (int leaf_index) { return GetGlobalDataCountInLeaf(leaf_index); });
  }

243
  Log::Debug("Trained a tree with leaves = %d and depth = %d", tree->num_leaves(), cur_depth);
Guolin Ke's avatar
Guolin Ke committed
244
  return tree.release();
Guolin Ke's avatar
Guolin Ke committed
245
246
}

247
Tree* SerialTreeLearner::FitByExistingTree(const Tree* old_tree, const score_t* gradients, const score_t *hessians) const {
Guolin Ke's avatar
Guolin Ke committed
248
  auto tree = std::unique_ptr<Tree>(new Tree(*old_tree));
Nikita Titov's avatar
Nikita Titov committed
249
  CHECK_GE(data_partition_->num_leaves(), tree->num_leaves());
250
  OMP_INIT_EX();
251
  #pragma omp parallel for num_threads(OMP_NUM_THREADS()) schedule(static)
252
  for (int i = 0; i < tree->num_leaves(); ++i) {
253
    OMP_LOOP_EX_BEGIN();
Guolin Ke's avatar
Guolin Ke committed
254
255
256
    data_size_t cnt_leaf_data = 0;
    auto tmp_idx = data_partition_->GetIndexOnLeaf(i, &cnt_leaf_data);
    double sum_grad = 0.0f;
257
    double sum_hess = kEpsilon;
Guolin Ke's avatar
Guolin Ke committed
258
259
260
261
262
    for (data_size_t j = 0; j < cnt_leaf_data; ++j) {
      auto idx = tmp_idx[j];
      sum_grad += gradients[idx];
      sum_hess += hessians[idx];
    }
Belinda Trotta's avatar
Belinda Trotta committed
263
264
265
266
267
268
269
270
271
272
    double output;
    if ((config_->path_smooth > kEpsilon) & (i > 0)) {
      output = FeatureHistogram::CalculateSplittedLeafOutput<true, true, true>(
          sum_grad, sum_hess, config_->lambda_l1, config_->lambda_l2,
          config_->max_delta_step, config_->path_smooth, cnt_leaf_data, tree->leaf_parent(i));
    } else {
      output = FeatureHistogram::CalculateSplittedLeafOutput<true, true, false>(
          sum_grad, sum_hess, config_->lambda_l1, config_->lambda_l2,
          config_->max_delta_step, config_->path_smooth, cnt_leaf_data, 0);
    }
Guolin Ke's avatar
Guolin Ke committed
273
274
275
    auto old_leaf_output = tree->LeafOutput(i);
    auto new_leaf_output = output * tree->shrinkage();
    tree->SetLeafOutput(i, config_->refit_decay_rate * old_leaf_output + (1.0 - config_->refit_decay_rate) * new_leaf_output);
276
    OMP_LOOP_EX_END();
Guolin Ke's avatar
Guolin Ke committed
277
  }
278
  OMP_THROW_EX();
Guolin Ke's avatar
Guolin Ke committed
279
280
281
  return tree.release();
}

282
283
Tree* SerialTreeLearner::FitByExistingTree(const Tree* old_tree, const std::vector<int>& leaf_pred,
                                           const score_t* gradients, const score_t *hessians) const {
284
285
286
287
  data_partition_->ResetByLeafPred(leaf_pred, old_tree->num_leaves());
  return FitByExistingTree(old_tree, gradients, hessians);
}

Guolin Ke's avatar
Guolin Ke committed
288
void SerialTreeLearner::BeforeTrain() {
289
  Common::FunctionTimer fun_timer("SerialTreeLearner::BeforeTrain", global_timer);
290
291
  // reset histogram pool
  histogram_pool_.ResetMap();
Guolin Ke's avatar
Guolin Ke committed
292

293
294
  col_sampler_.ResetByTree();
  train_data_->InitTrain(col_sampler_.is_feature_used_bytree(), share_state_.get());
Guolin Ke's avatar
Guolin Ke committed
295
296
297
  // initialize data partition
  data_partition_->Init();

298
299
  constraints_->Reset();

Guolin Ke's avatar
Guolin Ke committed
300
  // reset the splits for leaves
Guolin Ke's avatar
Guolin Ke committed
301
  for (int i = 0; i < config_->num_leaves; ++i) {
Guolin Ke's avatar
Guolin Ke committed
302
303
304
305
306
307
    best_split_per_leaf_[i].Reset();
  }

  // Sumup for root
  if (data_partition_->leaf_count(0) == num_data_) {
    // use all data
308
309
310
311
312
313
314
315
    if (!config_->use_quantized_grad) {
      smaller_leaf_splits_->Init(gradients_, hessians_);
    } else {
      smaller_leaf_splits_->Init(
        gradient_discretizer_->discretized_gradients_and_hessians(),
        gradient_discretizer_->grad_scale(),
        gradient_discretizer_->hess_scale());
    }
Guolin Ke's avatar
Guolin Ke committed
316
317
  } else {
    // use bagging, only use part of data
318
319
320
321
322
323
    if (!config_->use_quantized_grad) {
      smaller_leaf_splits_->Init(0, data_partition_.get(), gradients_, hessians_);
    } else {
      smaller_leaf_splits_->Init(
        0, data_partition_.get(),
        gradient_discretizer_->discretized_gradients_and_hessians(),
324
325
        static_cast<score_t>(gradient_discretizer_->grad_scale()),
        static_cast<score_t>(gradient_discretizer_->hess_scale()));
326
    }
Guolin Ke's avatar
Guolin Ke committed
327
328
329
  }

  larger_leaf_splits_->Init();
330
331
332
333

  if (cegb_ != nullptr) {
    cegb_->BeforeTrain();
  }
334
335
336
337

  if (config_->use_quantized_grad && config_->tree_learner != std::string("data")) {
    gradient_discretizer_->SetNumBitsInHistogramBin<false>(0, -1, data_partition_->leaf_count(0), 0);
  }
Guolin Ke's avatar
Guolin Ke committed
338
339
}

Guolin Ke's avatar
Guolin Ke committed
340
bool SerialTreeLearner::BeforeFindBestSplit(const Tree* tree, int left_leaf, int right_leaf) {
341
  Common::FunctionTimer fun_timer("SerialTreeLearner::BeforeFindBestSplit", global_timer);
Guolin Ke's avatar
Guolin Ke committed
342
  // check depth of current leaf
Guolin Ke's avatar
Guolin Ke committed
343
  if (config_->max_depth > 0) {
Guolin Ke's avatar
Guolin Ke committed
344
    // only need to check left leaf, since right leaf is in same level of left leaf
Guolin Ke's avatar
Guolin Ke committed
345
    if (tree->leaf_depth(left_leaf) >= config_->max_depth) {
Guolin Ke's avatar
Guolin Ke committed
346
347
348
349
350
351
352
      best_split_per_leaf_[left_leaf].gain = kMinScore;
      if (right_leaf >= 0) {
        best_split_per_leaf_[right_leaf].gain = kMinScore;
      }
      return false;
    }
  }
Guolin Ke's avatar
Guolin Ke committed
353
354
355
  data_size_t num_data_in_left_child = GetGlobalDataCountInLeaf(left_leaf);
  data_size_t num_data_in_right_child = GetGlobalDataCountInLeaf(right_leaf);
  // no enough data to continue
Guolin Ke's avatar
Guolin Ke committed
356
357
  if (num_data_in_right_child < static_cast<data_size_t>(config_->min_data_in_leaf * 2)
      && num_data_in_left_child < static_cast<data_size_t>(config_->min_data_in_leaf * 2)) {
Guolin Ke's avatar
Guolin Ke committed
358
359
360
361
362
363
    best_split_per_leaf_[left_leaf].gain = kMinScore;
    if (right_leaf >= 0) {
      best_split_per_leaf_[right_leaf].gain = kMinScore;
    }
    return false;
  }
364
  parent_leaf_histogram_array_ = nullptr;
Guolin Ke's avatar
Guolin Ke committed
365
366
  // only have root
  if (right_leaf < 0) {
367
    histogram_pool_.Get(left_leaf, &smaller_leaf_histogram_array_);
Guolin Ke's avatar
Guolin Ke committed
368
369
    larger_leaf_histogram_array_ = nullptr;
  } else if (num_data_in_left_child < num_data_in_right_child) {
Hui Xue's avatar
Hui Xue committed
370
    // put parent(left) leaf's histograms into larger leaf's histograms
371
372
373
    if (histogram_pool_.Get(left_leaf, &larger_leaf_histogram_array_)) {
      parent_leaf_histogram_array_ = larger_leaf_histogram_array_;
    }
374
375
    histogram_pool_.Move(left_leaf, right_leaf);
    histogram_pool_.Get(left_leaf, &smaller_leaf_histogram_array_);
Guolin Ke's avatar
Guolin Ke committed
376
  } else {
Hui Xue's avatar
Hui Xue committed
377
    // put parent(left) leaf's histograms to larger leaf's histograms
378
379
380
    if (histogram_pool_.Get(left_leaf, &larger_leaf_histogram_array_)) {
      parent_leaf_histogram_array_ = larger_leaf_histogram_array_;
    }
381
    histogram_pool_.Get(right_leaf, &smaller_leaf_histogram_array_);
Guolin Ke's avatar
Guolin Ke committed
382
383
384
385
  }
  return true;
}

386
void SerialTreeLearner::FindBestSplits(const Tree* tree) {
387
388
389
390
  FindBestSplits(tree, nullptr);
}

void SerialTreeLearner::FindBestSplits(const Tree* tree, const std::set<int>* force_features) {
Guolin Ke's avatar
Guolin Ke committed
391
  std::vector<int8_t> is_feature_used(num_features_, 0);
392
  #pragma omp parallel for num_threads(OMP_NUM_THREADS()) schedule(static, 256) if (num_features_ >= 512)
Guolin Ke's avatar
Guolin Ke committed
393
  for (int feature_index = 0; feature_index < num_features_; ++feature_index) {
394
    if (!col_sampler_.is_feature_used_bytree()[feature_index] && (force_features == nullptr || force_features->find(feature_index) == force_features->end())) continue;
Guolin Ke's avatar
Guolin Ke committed
395
396
397
398
399
400
401
402
    if (parent_leaf_histogram_array_ != nullptr
        && !parent_leaf_histogram_array_[feature_index].is_splittable()) {
      smaller_leaf_histogram_array_[feature_index].set_is_splittable(false);
      continue;
    }
    is_feature_used[feature_index] = 1;
  }
  bool use_subtract = parent_leaf_histogram_array_ != nullptr;
403

Guolin Ke's avatar
Guolin Ke committed
404
  ConstructHistograms(is_feature_used, use_subtract);
405
  FindBestSplitsFromHistograms(is_feature_used, use_subtract, tree);
Guolin Ke's avatar
Guolin Ke committed
406
407
}

408
409
410
411
void SerialTreeLearner::ConstructHistograms(
    const std::vector<int8_t>& is_feature_used, bool use_subtract) {
  Common::FunctionTimer fun_timer("SerialTreeLearner::ConstructHistograms",
                                  global_timer);
Guolin Ke's avatar
Guolin Ke committed
412
  // construct smaller leaf
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
  if (config_->use_quantized_grad) {
    const uint8_t smaller_leaf_num_bits = gradient_discretizer_->GetHistBitsInLeaf<false>(smaller_leaf_splits_->leaf_index());
    hist_t* ptr_smaller_leaf_hist_data =
        smaller_leaf_num_bits <= 16 ?
        reinterpret_cast<hist_t*>(smaller_leaf_histogram_array_[0].RawDataInt16() - kHistOffset) :
        reinterpret_cast<hist_t*>(smaller_leaf_histogram_array_[0].RawDataInt32() - kHistOffset);
    #define SMALLER_LEAF_ARGS \
      is_feature_used, smaller_leaf_splits_->data_indices(), \
      smaller_leaf_splits_->num_data_in_leaf(), \
      reinterpret_cast<const score_t*>(gradient_discretizer_->discretized_gradients_and_hessians()), \
      nullptr, \
      reinterpret_cast<score_t*>(gradient_discretizer_->ordered_int_gradients_and_hessians()), \
      nullptr, \
      share_state_.get(), \
      reinterpret_cast<hist_t*>(ptr_smaller_leaf_hist_data)
    if (smaller_leaf_num_bits <= 16) {
      train_data_->ConstructHistograms<true, 16>(SMALLER_LEAF_ARGS);
    } else {
      train_data_->ConstructHistograms<true, 32>(SMALLER_LEAF_ARGS);
    }
    #undef SMALLER_LEAF_ARGS
    if (larger_leaf_histogram_array_ && !use_subtract) {
      const uint8_t larger_leaf_num_bits = gradient_discretizer_->GetHistBitsInLeaf<false>(larger_leaf_splits_->leaf_index());
      hist_t* ptr_larger_leaf_hist_data =
        larger_leaf_num_bits <= 16 ?
        reinterpret_cast<hist_t*>(larger_leaf_histogram_array_[0].RawDataInt16() - kHistOffset) :
        reinterpret_cast<hist_t*>(larger_leaf_histogram_array_[0].RawDataInt32() - kHistOffset);
      #define LARGER_LEAF_ARGS \
        is_feature_used, larger_leaf_splits_->data_indices(), \
        larger_leaf_splits_->num_data_in_leaf(), \
        reinterpret_cast<const score_t*>(gradient_discretizer_->discretized_gradients_and_hessians()), \
        nullptr, \
        reinterpret_cast<score_t*>(gradient_discretizer_->ordered_int_gradients_and_hessians()), \
        nullptr, \
        share_state_.get(), \
        reinterpret_cast<hist_t*>(ptr_larger_leaf_hist_data)
      if (larger_leaf_num_bits <= 16) {
        train_data_->ConstructHistograms<true, 16>(LARGER_LEAF_ARGS);
      } else {
        train_data_->ConstructHistograms<true, 32>(LARGER_LEAF_ARGS);
      }
      #undef LARGER_LEAF_ARGS
    }
  } else {
    hist_t* ptr_smaller_leaf_hist_data =
        smaller_leaf_histogram_array_[0].RawData() - kHistOffset;
    train_data_->ConstructHistograms<false, 0>(
        is_feature_used, smaller_leaf_splits_->data_indices(),
        smaller_leaf_splits_->num_data_in_leaf(), gradients_, hessians_,
462
        ordered_gradients_.data(), ordered_hessians_.data(), share_state_.get(),
463
464
465
466
467
468
469
470
471
472
473
        ptr_smaller_leaf_hist_data);
    if (larger_leaf_histogram_array_ != nullptr && !use_subtract) {
      // construct larger leaf
      hist_t* ptr_larger_leaf_hist_data =
          larger_leaf_histogram_array_[0].RawData() - kHistOffset;
      train_data_->ConstructHistograms<false, 0>(
          is_feature_used, larger_leaf_splits_->data_indices(),
          larger_leaf_splits_->num_data_in_leaf(), gradients_, hessians_,
          ordered_gradients_.data(), ordered_hessians_.data(), share_state_.get(),
          ptr_larger_leaf_hist_data);
    }
Guolin Ke's avatar
Guolin Ke committed
474
  }
475
476
}

Guolin Ke's avatar
Guolin Ke committed
477
void SerialTreeLearner::FindBestSplitsFromHistograms(
478
    const std::vector<int8_t>& is_feature_used, bool use_subtract, const Tree* tree) {
Guolin Ke's avatar
Guolin Ke committed
479
480
  Common::FunctionTimer fun_timer(
      "SerialTreeLearner::FindBestSplitsFromHistograms", global_timer);
481
482
  std::vector<SplitInfo> smaller_best(share_state_->num_threads);
  std::vector<SplitInfo> larger_best(share_state_->num_threads);
483
484
  std::vector<int8_t> smaller_node_used_features = col_sampler_.GetByNode(tree, smaller_leaf_splits_->leaf_index());
  std::vector<int8_t> larger_node_used_features;
485
486
487
488
489
  double smaller_leaf_parent_output = GetParentOutput(tree, smaller_leaf_splits_.get());
  double larger_leaf_parent_output = 0;
  if (larger_leaf_splits_ != nullptr && larger_leaf_splits_->leaf_index() >= 0) {
    larger_leaf_parent_output = GetParentOutput(tree, larger_leaf_splits_.get());
  }
490
491
492
  if (larger_leaf_splits_->leaf_index() >= 0) {
    larger_node_used_features = col_sampler_.GetByNode(tree, larger_leaf_splits_->leaf_index());
  }
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512

  if (use_subtract && config_->use_quantized_grad) {
    const int parent_index = std::min(smaller_leaf_splits_->leaf_index(), larger_leaf_splits_->leaf_index());
    const uint8_t parent_hist_bits = gradient_discretizer_->GetHistBitsInNode<false>(parent_index);
    const uint8_t larger_hist_bits = gradient_discretizer_->GetHistBitsInLeaf<false>(larger_leaf_splits_->leaf_index());
    if (parent_hist_bits > 16 && larger_hist_bits <= 16) {
      OMP_INIT_EX();
      #pragma omp parallel for schedule(static) num_threads(share_state_->num_threads)
      for (int feature_index = 0; feature_index < num_features_; ++feature_index) {
        OMP_LOOP_EX_BEGIN();
        if (!is_feature_used[feature_index]) {
          continue;
        }
        larger_leaf_histogram_array_[feature_index].CopyToBuffer(gradient_discretizer_->GetChangeHistBitsBuffer(feature_index));
        OMP_LOOP_EX_END();
      }
      OMP_THROW_EX();
    }
  }

513
  OMP_INIT_EX();
Guolin Ke's avatar
Guolin Ke committed
514
// find splits
515
#pragma omp parallel for schedule(static) num_threads(share_state_->num_threads)
Guolin Ke's avatar
Guolin Ke committed
516
  for (int feature_index = 0; feature_index < num_features_; ++feature_index) {
517
    OMP_LOOP_EX_BEGIN();
Guolin Ke's avatar
Guolin Ke committed
518
519
520
    if (!is_feature_used[feature_index]) {
      continue;
    }
Guolin Ke's avatar
Guolin Ke committed
521
    const int tid = omp_get_thread_num();
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
    if (config_->use_quantized_grad) {
      const uint8_t hist_bits_bin = gradient_discretizer_->GetHistBitsInLeaf<false>(smaller_leaf_splits_->leaf_index());
      const int64_t int_sum_gradient_and_hessian = smaller_leaf_splits_->int_sum_gradients_and_hessians();
      if (hist_bits_bin <= 16) {
        train_data_->FixHistogramInt<int32_t, int32_t, 16, 16>(
            feature_index, int_sum_gradient_and_hessian,
            reinterpret_cast<hist_t*>(smaller_leaf_histogram_array_[feature_index].RawDataInt16()));
      } else {
        train_data_->FixHistogramInt<int64_t, int64_t, 32, 32>(
            feature_index, int_sum_gradient_and_hessian,
            reinterpret_cast<hist_t*>(smaller_leaf_histogram_array_[feature_index].RawDataInt32()));
      }
    } else {
      train_data_->FixHistogram(
          feature_index, smaller_leaf_splits_->sum_gradients(),
          smaller_leaf_splits_->sum_hessians(),
          smaller_leaf_histogram_array_[feature_index].RawData());
    }
540
    int real_fidx = train_data_->RealFeatureIndex(feature_index);
541
542
543
544
545

    ComputeBestSplitForFeature(smaller_leaf_histogram_array_, feature_index,
                               real_fidx,
                               smaller_node_used_features[feature_index],
                               smaller_leaf_splits_->num_data_in_leaf(),
546
547
                               smaller_leaf_splits_.get(), &smaller_best[tid],
                               smaller_leaf_parent_output);
548

Guolin Ke's avatar
Guolin Ke committed
549
    // only has root leaf
Guolin Ke's avatar
Guolin Ke committed
550
551
552
553
    if (larger_leaf_splits_ == nullptr ||
        larger_leaf_splits_->leaf_index() < 0) {
      continue;
    }
Guolin Ke's avatar
Guolin Ke committed
554

Guolin Ke's avatar
Guolin Ke committed
555
    if (use_subtract) {
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
      if (config_->use_quantized_grad) {
        const int parent_index = std::min(smaller_leaf_splits_->leaf_index(), larger_leaf_splits_->leaf_index());
        const uint8_t parent_hist_bits = gradient_discretizer_->GetHistBitsInNode<false>(parent_index);
        const uint8_t smaller_hist_bits = gradient_discretizer_->GetHistBitsInLeaf<false>(smaller_leaf_splits_->leaf_index());
        const uint8_t larger_hist_bits = gradient_discretizer_->GetHistBitsInLeaf<false>(larger_leaf_splits_->leaf_index());
        if (parent_hist_bits <= 16) {
          CHECK_LE(smaller_hist_bits, 16);
          CHECK_LE(larger_hist_bits, 16);
          larger_leaf_histogram_array_[feature_index].Subtract<true, int32_t, int32_t, int32_t, 16, 16, 16>(
              smaller_leaf_histogram_array_[feature_index]);
        } else if (larger_hist_bits <= 16) {
          CHECK_LE(smaller_hist_bits, 16);
          larger_leaf_histogram_array_[feature_index].Subtract<true, int64_t, int32_t, int32_t, 32, 16, 16>(
              smaller_leaf_histogram_array_[feature_index], gradient_discretizer_->GetChangeHistBitsBuffer(feature_index));
        } else if (smaller_hist_bits <= 16) {
          larger_leaf_histogram_array_[feature_index].Subtract<true, int64_t, int32_t, int64_t, 32, 16, 32>(
              smaller_leaf_histogram_array_[feature_index]);
        } else {
          larger_leaf_histogram_array_[feature_index].Subtract<true, int64_t, int64_t, int64_t, 32, 32, 32>(
              smaller_leaf_histogram_array_[feature_index]);
        }
      } else {
        larger_leaf_histogram_array_[feature_index].Subtract<false>(
            smaller_leaf_histogram_array_[feature_index]);
      }
581
    } else {
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
      if (config_->use_quantized_grad) {
        const int64_t int_sum_gradient_and_hessian = larger_leaf_splits_->int_sum_gradients_and_hessians();
        const uint8_t hist_bits_bin = gradient_discretizer_->GetHistBitsInLeaf<false>(larger_leaf_splits_->leaf_index());
        if (hist_bits_bin <= 16) {
          train_data_->FixHistogramInt<int32_t, int32_t, 16, 16>(
              feature_index, int_sum_gradient_and_hessian,
              reinterpret_cast<hist_t*>(larger_leaf_histogram_array_[feature_index].RawDataInt16()));
        } else {
          train_data_->FixHistogramInt<int64_t, int64_t, 32, 32>(
              feature_index, int_sum_gradient_and_hessian,
              reinterpret_cast<hist_t*>(larger_leaf_histogram_array_[feature_index].RawDataInt32()));
        }
      } else {
        train_data_->FixHistogram(
            feature_index, larger_leaf_splits_->sum_gradients(),
            larger_leaf_splits_->sum_hessians(),
            larger_leaf_histogram_array_[feature_index].RawData());
      }
600
    }
601
602
603
604
605

    ComputeBestSplitForFeature(larger_leaf_histogram_array_, feature_index,
                               real_fidx,
                               larger_node_used_features[feature_index],
                               larger_leaf_splits_->num_data_in_leaf(),
606
607
                               larger_leaf_splits_.get(), &larger_best[tid],
                               larger_leaf_parent_output);
608

609
    OMP_LOOP_EX_END();
Guolin Ke's avatar
Guolin Ke committed
610
  }
611
  OMP_THROW_EX();
Guolin Ke's avatar
Guolin Ke committed
612
  auto smaller_best_idx = ArrayArgs<SplitInfo>::ArgMax(smaller_best);
613
  int leaf = smaller_leaf_splits_->leaf_index();
Guolin Ke's avatar
Guolin Ke committed
614
615
  best_split_per_leaf_[leaf] = smaller_best[smaller_best_idx];

Guolin Ke's avatar
Guolin Ke committed
616
617
  if (larger_leaf_splits_ != nullptr &&
      larger_leaf_splits_->leaf_index() >= 0) {
618
    leaf = larger_leaf_splits_->leaf_index();
Guolin Ke's avatar
Guolin Ke committed
619
620
621
622
623
    auto larger_best_idx = ArrayArgs<SplitInfo>::ArgMax(larger_best);
    best_split_per_leaf_[leaf] = larger_best[larger_best_idx];
  }
}

624
625
626
627
628
629
int32_t SerialTreeLearner::ForceSplits(Tree* tree, int* left_leaf,
                                       int* right_leaf, int *cur_depth) {
  bool abort_last_forced_split = false;
  if (forced_split_json_ == nullptr) {
    return 0;
  }
630
631
632
633
  int32_t result_count = 0;
  // start at root leaf
  *left_leaf = 0;
  std::queue<std::pair<Json, int>> q;
634
  Json left = *forced_split_json_;
635
636
637
  Json right;
  bool left_smaller = true;
  std::unordered_map<int, SplitInfo> forceSplitMap;
638
  q.push(std::make_pair(left, *left_leaf));
639
640
641

  // Histogram construction require parent features.
  std::set<int> force_split_features = FindAllForceFeatures(*forced_split_json_);
642
  while (!q.empty()) {
643
    if (BeforeFindBestSplit(tree, *left_leaf, *right_leaf)) {
644
      FindBestSplits(tree, &force_split_features);
645
    }
646

647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
    // then, compute own splits
    SplitInfo left_split;
    SplitInfo right_split;

    if (!left.is_null()) {
      const int left_feature = left["feature"].int_value();
      const double left_threshold_double = left["threshold"].number_value();
      const int left_inner_feature_index = train_data_->InnerFeatureIndex(left_feature);
      const uint32_t left_threshold = train_data_->BinThreshold(
              left_inner_feature_index, left_threshold_double);
      auto leaf_histogram_array = (left_smaller) ? smaller_leaf_histogram_array_ : larger_leaf_histogram_array_;
      auto left_leaf_splits = (left_smaller) ? smaller_leaf_splits_.get() : larger_leaf_splits_.get();
      leaf_histogram_array[left_inner_feature_index].GatherInfoForThreshold(
              left_leaf_splits->sum_gradients(),
              left_leaf_splits->sum_hessians(),
              left_threshold,
              left_leaf_splits->num_data_in_leaf(),
Belinda Trotta's avatar
Belinda Trotta committed
664
              left_leaf_splits->weight(),
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
              &left_split);
      left_split.feature = left_feature;
      forceSplitMap[*left_leaf] = left_split;
      if (left_split.gain < 0) {
        forceSplitMap.erase(*left_leaf);
      }
    }

    if (!right.is_null()) {
      const int right_feature = right["feature"].int_value();
      const double right_threshold_double = right["threshold"].number_value();
      const int right_inner_feature_index = train_data_->InnerFeatureIndex(right_feature);
      const uint32_t right_threshold = train_data_->BinThreshold(
              right_inner_feature_index, right_threshold_double);
      auto leaf_histogram_array = (left_smaller) ? larger_leaf_histogram_array_ : smaller_leaf_histogram_array_;
      auto right_leaf_splits = (left_smaller) ? larger_leaf_splits_.get() : smaller_leaf_splits_.get();
      leaf_histogram_array[right_inner_feature_index].GatherInfoForThreshold(
        right_leaf_splits->sum_gradients(),
        right_leaf_splits->sum_hessians(),
        right_threshold,
        right_leaf_splits->num_data_in_leaf(),
Belinda Trotta's avatar
Belinda Trotta committed
686
        right_leaf_splits->weight(),
687
688
689
690
691
692
693
694
695
696
697
698
699
        &right_split);
      right_split.feature = right_feature;
      forceSplitMap[*right_leaf] = right_split;
      if (right_split.gain < 0) {
        forceSplitMap.erase(*right_leaf);
      }
    }

    std::pair<Json, int> pair = q.front();
    q.pop();
    int current_leaf = pair.second;
    // split info should exist because searching in bfs fashion - should have added from parent
    if (forceSplitMap.find(current_leaf) == forceSplitMap.end()) {
700
        abort_last_forced_split = true;
701
702
        break;
    }
703
704
705
706
    best_split_per_leaf_[current_leaf] = forceSplitMap[current_leaf];
    Split(tree, current_leaf, left_leaf, right_leaf);
    left_smaller = best_split_per_leaf_[current_leaf].left_count <
                   best_split_per_leaf_[current_leaf].right_count;
707
708
709
710
    left = Json();
    right = Json();
    if ((pair.first).object_items().count("left") > 0) {
      left = (pair.first)["left"];
711
712
713
      if (left.object_items().count("feature") > 0 && left.object_items().count("threshold") > 0) {
        q.push(std::make_pair(left, *left_leaf));
      }
714
715
716
    }
    if ((pair.first).object_items().count("right") > 0) {
      right = (pair.first)["right"];
717
718
719
      if (right.object_items().count("feature") > 0 && right.object_items().count("threshold") > 0) {
        q.push(std::make_pair(right, *right_leaf));
      }
720
721
722
723
    }
    result_count++;
    *(cur_depth) = std::max(*(cur_depth), tree->leaf_depth(*left_leaf));
  }
724
725
726
727
728
729
730
731
732
733
734
  if (abort_last_forced_split) {
    int best_leaf =
        static_cast<int>(ArrayArgs<SplitInfo>::ArgMax(best_split_per_leaf_));
    const SplitInfo& best_leaf_SplitInfo = best_split_per_leaf_[best_leaf];
    if (best_leaf_SplitInfo.gain <= 0.0) {
      Log::Warning("No further splits with positive gain, best gain: %f",
                   best_leaf_SplitInfo.gain);
      return config_->num_leaves;
    }
    Split(tree, best_leaf, left_leaf, right_leaf);
    *(cur_depth) = std::max(*(cur_depth), tree->leaf_depth(*left_leaf));
735
    ++result_count;
736
  }
737
738
  return result_count;
}
Guolin Ke's avatar
Guolin Ke committed
739

740
741
std::set<int> SerialTreeLearner::FindAllForceFeatures(Json force_split_leaf_setting) {
  std::set<int> force_features;
742
  std::queue<Json> force_split_leaves;
743

744
  force_split_leaves.push(force_split_leaf_setting);
745

746
747
748
  while (!force_split_leaves.empty()) {
    Json split_leaf = force_split_leaves.front();
    force_split_leaves.pop();
749
750
751
752
753
754

    const int feature_index = split_leaf["feature"].int_value();
    const int feature_inner_index = train_data_->InnerFeatureIndex(feature_index);
    force_features.insert(feature_inner_index);

    if (split_leaf.object_items().count("left") > 0) {
755
      force_split_leaves.push(split_leaf["left"]);
756
757
758
    }

    if (split_leaf.object_items().count("right") > 0) {
759
      force_split_leaves.push(split_leaf["right"]);
760
761
762
763
764
765
    }
  }

  return force_features;
}

766
767
768
void SerialTreeLearner::SplitInner(Tree* tree, int best_leaf, int* left_leaf,
                                   int* right_leaf, bool update_cnt) {
  Common::FunctionTimer fun_timer("SerialTreeLearner::SplitInner", global_timer);
769
  SplitInfo& best_split_info = best_split_per_leaf_[best_leaf];
770
771
  const int inner_feature_index =
      train_data_->InnerFeatureIndex(best_split_info.feature);
772
  if (cegb_ != nullptr) {
773
774
    cegb_->UpdateLeafBestSplits(tree, best_leaf, &best_split_info,
                                &best_split_per_leaf_);
775
  }
776
  *left_leaf = best_leaf;
777
778
  auto next_leaf_id = tree->NextLeafId();

779
  // update before tree split
780
  constraints_->BeforeSplit(best_leaf, next_leaf_id,
781
782
                            best_split_info.monotone_type);

783
784
785
  bool is_numerical_split =
      train_data_->FeatureBinMapper(inner_feature_index)->bin_type() ==
      BinType::NumericalBin;
Guolin Ke's avatar
Guolin Ke committed
786
  if (is_numerical_split) {
787
788
    auto threshold_double = train_data_->RealThreshold(
        inner_feature_index, best_split_info.threshold);
789
    data_partition_->Split(best_leaf, train_data_, inner_feature_index,
790
791
792
793
794
795
796
                           &best_split_info.threshold, 1,
                           best_split_info.default_left, next_leaf_id);
    if (update_cnt) {
      // don't need to update this in data-based parallel model
      best_split_info.left_count = data_partition_->leaf_count(*left_leaf);
      best_split_info.right_count = data_partition_->leaf_count(next_leaf_id);
    }
797
    // split tree, will return right leaf
798
799
800
801
802
803
804
805
806
    *right_leaf = tree->Split(
        best_leaf, inner_feature_index, best_split_info.feature,
        best_split_info.threshold, threshold_double,
        static_cast<double>(best_split_info.left_output),
        static_cast<double>(best_split_info.right_output),
        static_cast<data_size_t>(best_split_info.left_count),
        static_cast<data_size_t>(best_split_info.right_count),
        static_cast<double>(best_split_info.left_sum_hessian),
        static_cast<double>(best_split_info.right_sum_hessian),
807
808
        // store the true split gain in tree model
        static_cast<float>(best_split_info.gain + config_->min_gain_to_split),
809
810
        train_data_->FeatureBinMapper(inner_feature_index)->missing_type(),
        best_split_info.default_left);
811
  } else {
812
813
814
    std::vector<uint32_t> cat_bitset_inner =
        Common::ConstructBitset(best_split_info.cat_threshold.data(),
                                best_split_info.num_cat_threshold);
815
816
    std::vector<int> threshold_int(best_split_info.num_cat_threshold);
    for (int i = 0; i < best_split_info.num_cat_threshold; ++i) {
817
818
      threshold_int[i] = static_cast<int>(train_data_->RealThreshold(
          inner_feature_index, best_split_info.cat_threshold[i]));
819
    }
820
821
    std::vector<uint32_t> cat_bitset = Common::ConstructBitset(
        threshold_int.data(), best_split_info.num_cat_threshold);
822

823
    data_partition_->Split(best_leaf, train_data_, inner_feature_index,
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
                           cat_bitset_inner.data(),
                           static_cast<int>(cat_bitset_inner.size()),
                           best_split_info.default_left, next_leaf_id);

    if (update_cnt) {
      // don't need to update this in data-based parallel model
      best_split_info.left_count = data_partition_->leaf_count(*left_leaf);
      best_split_info.right_count = data_partition_->leaf_count(next_leaf_id);
    }

    *right_leaf = tree->SplitCategorical(
        best_leaf, inner_feature_index, best_split_info.feature,
        cat_bitset_inner.data(), static_cast<int>(cat_bitset_inner.size()),
        cat_bitset.data(), static_cast<int>(cat_bitset.size()),
        static_cast<double>(best_split_info.left_output),
        static_cast<double>(best_split_info.right_output),
        static_cast<data_size_t>(best_split_info.left_count),
        static_cast<data_size_t>(best_split_info.right_count),
        static_cast<double>(best_split_info.left_sum_hessian),
        static_cast<double>(best_split_info.right_sum_hessian),
844
845
        // store the true split gain in tree model
        static_cast<float>(best_split_info.gain + config_->min_gain_to_split),
846
        train_data_->FeatureBinMapper(inner_feature_index)->missing_type());
847
  }
848

849
#ifdef DEBUG
850
  CHECK(*right_leaf == next_leaf_id);
851
#endif
852

Guolin Ke's avatar
Guolin Ke committed
853
  // init the leaves that used on next iteration
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
  if (!config_->use_quantized_grad) {
    if (best_split_info.left_count < best_split_info.right_count) {
      CHECK_GT(best_split_info.left_count, 0);
      smaller_leaf_splits_->Init(*left_leaf, data_partition_.get(),
                                 best_split_info.left_sum_gradient,
                                 best_split_info.left_sum_hessian,
                                 best_split_info.left_output);
      larger_leaf_splits_->Init(*right_leaf, data_partition_.get(),
                                best_split_info.right_sum_gradient,
                                best_split_info.right_sum_hessian,
                                best_split_info.right_output);
    } else {
      CHECK_GT(best_split_info.right_count, 0);
      smaller_leaf_splits_->Init(*right_leaf, data_partition_.get(),
                                 best_split_info.right_sum_gradient,
                                 best_split_info.right_sum_hessian,
                                 best_split_info.right_output);
      larger_leaf_splits_->Init(*left_leaf, data_partition_.get(),
                                best_split_info.left_sum_gradient,
                                best_split_info.left_sum_hessian,
                                best_split_info.left_output);
    }
Guolin Ke's avatar
Guolin Ke committed
876
  } else {
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
    if (best_split_info.left_count < best_split_info.right_count) {
      CHECK_GT(best_split_info.left_count, 0);
      smaller_leaf_splits_->Init(*left_leaf, data_partition_.get(),
                                 best_split_info.left_sum_gradient,
                                 best_split_info.left_sum_hessian,
                                 best_split_info.left_sum_gradient_and_hessian,
                                 best_split_info.left_output);
      larger_leaf_splits_->Init(*right_leaf, data_partition_.get(),
                                 best_split_info.right_sum_gradient,
                                 best_split_info.right_sum_hessian,
                                 best_split_info.right_sum_gradient_and_hessian,
                                 best_split_info.right_output);
    } else {
      CHECK_GT(best_split_info.right_count, 0);
      smaller_leaf_splits_->Init(*right_leaf, data_partition_.get(),
                                 best_split_info.right_sum_gradient,
                                 best_split_info.right_sum_hessian,
                                 best_split_info.right_sum_gradient_and_hessian,
                                 best_split_info.right_output);
      larger_leaf_splits_->Init(*left_leaf, data_partition_.get(),
                                best_split_info.left_sum_gradient,
                                best_split_info.left_sum_hessian,
                                best_split_info.left_sum_gradient_and_hessian,
                                best_split_info.left_output);
    }
Guolin Ke's avatar
Guolin Ke committed
902
  }
903
904
905
906
907
  if (config_->use_quantized_grad && config_->tree_learner != std::string("data")) {
    gradient_discretizer_->SetNumBitsInHistogramBin<false>(*left_leaf, *right_leaf,
                                                    data_partition_->leaf_count(*left_leaf),
                                                    data_partition_->leaf_count(*right_leaf));
  }
908
909
910
911
912

  #ifdef DEBUG
  CheckSplit(best_split_info, *left_leaf, *right_leaf);
  #endif

913
  auto leaves_need_update = constraints_->Update(
914
      is_numerical_split, *left_leaf, *right_leaf,
915
916
917
918
919
      best_split_info.monotone_type, best_split_info.right_output,
      best_split_info.left_output, inner_feature_index, best_split_info,
      best_split_per_leaf_);
  // update leave outputs if needed
  for (auto leaf : leaves_need_update) {
920
    RecomputeBestSplitForLeaf(tree, leaf, &best_split_per_leaf_[leaf]);
921
  }
922
}
Guolin Ke's avatar
Guolin Ke committed
923

924
void SerialTreeLearner::RenewTreeOutput(Tree* tree, const ObjectiveFunction* obj, std::function<double(const label_t*, int)> residual_getter,
925
                                        data_size_t total_num_data, const data_size_t* bag_indices, data_size_t bag_cnt, const double* /*train_score*/) const {
926
  if (obj != nullptr && obj->IsRenewTreeOutput()) {
Nikita Titov's avatar
Nikita Titov committed
927
    CHECK_LE(tree->num_leaves(), data_partition_->num_leaves());
928
929
    const data_size_t* bag_mapper = nullptr;
    if (total_num_data != num_data_) {
930
      CHECK_EQ(bag_cnt, num_data_);
931
932
      bag_mapper = bag_indices;
    }
Guolin Ke's avatar
Guolin Ke committed
933
    std::vector<int> n_nozeroworker_perleaf(tree->num_leaves(), 1);
934
    int num_machines = Network::num_machines();
935
    #pragma omp parallel for num_threads(OMP_NUM_THREADS()) schedule(static)
936
937
938
939
    for (int i = 0; i < tree->num_leaves(); ++i) {
      const double output = static_cast<double>(tree->LeafOutput(i));
      data_size_t cnt_leaf_data = 0;
      auto index_mapper = data_partition_->GetIndexOnLeaf(i, &cnt_leaf_data);
Guolin Ke's avatar
Guolin Ke committed
940
941
      if (cnt_leaf_data > 0) {
        // bag_mapper[index_mapper[i]]
942
        const double new_output = obj->RenewTreeOutput(output, residual_getter, index_mapper, bag_mapper, cnt_leaf_data);
Guolin Ke's avatar
Guolin Ke committed
943
944
        tree->SetLeafOutput(i, new_output);
      } else {
945
        CHECK_GT(num_machines, 1);
Guolin Ke's avatar
Guolin Ke committed
946
947
948
949
950
951
952
953
954
        tree->SetLeafOutput(i, 0.0);
        n_nozeroworker_perleaf[i] = 0;
      }
    }
    if (num_machines > 1) {
      std::vector<double> outputs(tree->num_leaves());
      for (int i = 0; i < tree->num_leaves(); ++i) {
        outputs[i] = static_cast<double>(tree->LeafOutput(i));
      }
Guolin Ke's avatar
Guolin Ke committed
955
956
      outputs = Network::GlobalSum(&outputs);
      n_nozeroworker_perleaf = Network::GlobalSum(&n_nozeroworker_perleaf);
Guolin Ke's avatar
Guolin Ke committed
957
958
959
960
961
962
963
      for (int i = 0; i < tree->num_leaves(); ++i) {
        tree->SetLeafOutput(i, outputs[i] / n_nozeroworker_perleaf[i]);
      }
    }
  }
}

964
965
void SerialTreeLearner::ComputeBestSplitForFeature(
    FeatureHistogram* histogram_array_, int feature_index, int real_fidx,
Guolin Ke's avatar
Guolin Ke committed
966
    int8_t is_feature_used, int num_data, const LeafSplits* leaf_splits,
967
    SplitInfo* best_split, double parent_output) {
968
969
970
971
972
973
974
  bool is_feature_numerical = train_data_->FeatureBinMapper(feature_index)
                                  ->bin_type() == BinType::NumericalBin;
  if (is_feature_numerical & !config_->monotone_constraints.empty()) {
    constraints_->RecomputeConstraintsIfNeeded(
        constraints_.get(), feature_index, ~(leaf_splits->leaf_index()),
        train_data_->FeatureNumBin(feature_index));
  }
975
  SplitInfo new_split;
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
  if (config_->use_quantized_grad) {
    const uint8_t hist_bits_bin = gradient_discretizer_->GetHistBitsInLeaf<false>(leaf_splits->leaf_index());
    histogram_array_[feature_index].FindBestThresholdInt(
        leaf_splits->int_sum_gradients_and_hessians(),
        gradient_discretizer_->grad_scale(),
        gradient_discretizer_->hess_scale(),
        hist_bits_bin,
        hist_bits_bin,
        num_data,
        constraints_->GetFeatureConstraint(leaf_splits->leaf_index(), feature_index), parent_output, &new_split);
  } else {
    histogram_array_[feature_index].FindBestThreshold(
        leaf_splits->sum_gradients(), leaf_splits->sum_hessians(), num_data,
        constraints_->GetFeatureConstraint(leaf_splits->leaf_index(), feature_index), parent_output, &new_split);
  }
991
992
993
  new_split.feature = real_fidx;
  if (cegb_ != nullptr) {
    new_split.gain -=
994
        cegb_->DeltaGain(feature_index, real_fidx, leaf_splits->leaf_index(),
995
996
                         num_data, new_split);
  }
997
998
999
1000
1001
  if (new_split.monotone_type != 0) {
    double penalty = constraints_->ComputeMonotoneSplitGainPenalty(
        leaf_splits->leaf_index(), config_->monotone_penalty);
    new_split.gain *= penalty;
  }
Guolin Ke's avatar
Guolin Ke committed
1002
1003
1004
  // it is needed to filter the features after the above code.
  // Otherwise, the `is_splittable` in `FeatureHistogram` will be wrong, and cause some features being accidentally filtered in the later nodes.
  if (new_split > *best_split && is_feature_used) {
1005
1006
1007
1008
    *best_split = new_split;
  }
}

1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
double SerialTreeLearner::GetParentOutput(const Tree* tree, const LeafSplits* leaf_splits) const {
  double parent_output;
  if (tree->num_leaves() == 1) {
    // for root leaf the "parent" output is its own output because we don't apply any smoothing to the root
    parent_output = FeatureHistogram::CalculateSplittedLeafOutput<true, true, true, false>(
      leaf_splits->sum_gradients(), leaf_splits->sum_hessians(), config_->lambda_l1,
      config_->lambda_l2, config_->max_delta_step, BasicConstraint(),
      config_->path_smooth, static_cast<data_size_t>(leaf_splits->num_data_in_leaf()), 0);
  } else {
    parent_output = leaf_splits->weight();
  }
  return parent_output;
}

1023
void SerialTreeLearner::RecomputeBestSplitForLeaf(Tree* tree, int leaf, SplitInfo* split) {
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
  FeatureHistogram* histogram_array_;
  if (!histogram_pool_.Get(leaf, &histogram_array_)) {
    Log::Warning(
        "Get historical Histogram for leaf %d failed, will skip the "
        "``RecomputeBestSplitForLeaf``",
        leaf);
    return;
  }
  double sum_gradients = split->left_sum_gradient + split->right_sum_gradient;
  double sum_hessians = split->left_sum_hessian + split->right_sum_hessian;
  int num_data = split->left_count + split->right_count;

  std::vector<SplitInfo> bests(share_state_->num_threads);
Guolin Ke's avatar
Guolin Ke committed
1037
  LeafSplits leaf_splits(num_data, config_);
1038
1039
  leaf_splits.Init(leaf, sum_gradients, sum_hessians);

1040
1041
1042
1043
1044
1045
1046
1047
  // can't use GetParentOutput because leaf_splits doesn't have weight property set
  double parent_output = 0;
  if (config_->path_smooth > kEpsilon) {
    parent_output = FeatureHistogram::CalculateSplittedLeafOutput<true, true, true, false>(
      sum_gradients, sum_hessians, config_->lambda_l1, config_->lambda_l2, config_->max_delta_step,
      BasicConstraint(), config_->path_smooth, static_cast<data_size_t>(num_data), 0);
  }

1048
1049
  OMP_INIT_EX();
// find splits
1050
std::vector<int8_t> node_used_features = col_sampler_.GetByNode(tree, leaf);
1051
1052
1053
1054
1055
1056
1057
1058
1059
#pragma omp parallel for schedule(static) num_threads(share_state_->num_threads)
  for (int feature_index = 0; feature_index < num_features_; ++feature_index) {
    OMP_LOOP_EX_BEGIN();
    if (!col_sampler_.is_feature_used_bytree()[feature_index] ||
        !histogram_array_[feature_index].is_splittable()) {
      continue;
    }
    const int tid = omp_get_thread_num();
    int real_fidx = train_data_->RealFeatureIndex(feature_index);
1060
    ComputeBestSplitForFeature(histogram_array_, feature_index, real_fidx, node_used_features[feature_index],
1061
                               num_data, &leaf_splits, &bests[tid], parent_output);
1062
1063
1064
1065
1066
1067
1068
1069

    OMP_LOOP_EX_END();
  }
  OMP_THROW_EX();
  auto best_idx = ArrayArgs<SplitInfo>::ArgMax(bests);
  *split = bests[best_idx];
}

1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
#ifdef DEBUG
void SerialTreeLearner::CheckSplit(const SplitInfo& best_split_info, const int left_leaf_index, const int right_leaf_index) {
  data_size_t num_data_in_left = 0;
  data_size_t num_data_in_right = 0;
  const data_size_t* data_indices_in_left = data_partition_->GetIndexOnLeaf(left_leaf_index, &num_data_in_left);
  const data_size_t* data_indices_in_right = data_partition_->GetIndexOnLeaf(right_leaf_index, &num_data_in_right);
  if (config_->use_quantized_grad) {
    int32_t sum_left_gradient = 0;
    int32_t sum_left_hessian = 0;
    int32_t sum_right_gradient = 0;
    int32_t sum_right_hessian = 0;
    const int8_t* discretized_grad_and_hess = gradient_discretizer_->discretized_gradients_and_hessians();
    for (data_size_t i = 0; i < num_data_in_left; ++i) {
      const data_size_t index = data_indices_in_left[i];
      sum_left_gradient += discretized_grad_and_hess[2 * index + 1];
      sum_left_hessian += discretized_grad_and_hess[2 * index];
    }
    for (data_size_t i = 0; i < num_data_in_right; ++i) {
      const data_size_t index = data_indices_in_right[i];
      sum_right_gradient += discretized_grad_and_hess[2 * index + 1];
      sum_right_hessian += discretized_grad_and_hess[2 * index];
    }
    Log::Warning("============================ start leaf split info ============================");
    Log::Warning("left_leaf_index = %d, right_leaf_index = %d", left_leaf_index, right_leaf_index);
    Log::Warning("num_data_in_left = %d, num_data_in_right = %d", num_data_in_left, num_data_in_right);
    Log::Warning("sum_left_gradient = %d, best_split_info->left_sum_gradient_and_hessian.gradient = %d", sum_left_gradient,
      static_cast<int32_t>(best_split_info.left_sum_gradient_and_hessian >> 32));
    Log::Warning("sum_left_hessian = %d, best_split_info->left_sum_gradient_and_hessian.hessian = %d", sum_left_hessian,
      static_cast<int32_t>(best_split_info.left_sum_gradient_and_hessian & 0x00000000ffffffff));
    Log::Warning("sum_right_gradient = %d, best_split_info->right_sum_gradient_and_hessian.gradient = %d", sum_right_gradient,
      static_cast<int32_t>(best_split_info.right_sum_gradient_and_hessian >> 32));
    Log::Warning("sum_right_hessian = %d, best_split_info->right_sum_gradient_and_hessian.hessian = %d", sum_right_hessian,
      static_cast<int32_t>(best_split_info.right_sum_gradient_and_hessian & 0x00000000ffffffff));
    CHECK_EQ(num_data_in_left, best_split_info.left_count);
    CHECK_EQ(num_data_in_right, best_split_info.right_count);
    CHECK_EQ(sum_left_gradient, static_cast<int32_t>(best_split_info.left_sum_gradient_and_hessian >> 32))
    CHECK_EQ(sum_left_hessian, static_cast<int32_t>(best_split_info.left_sum_gradient_and_hessian & 0x00000000ffffffff));
    CHECK_EQ(sum_right_gradient, static_cast<int32_t>(best_split_info.right_sum_gradient_and_hessian >> 32));
    CHECK_EQ(sum_right_hessian, static_cast<int32_t>(best_split_info.right_sum_gradient_and_hessian & 0x00000000ffffffff));
    Log::Warning("============================ end leaf split info ============================");
  }
}
#endif

Guolin Ke's avatar
Guolin Ke committed
1114
}  // namespace LightGBM