serial_tree_learner.cpp 44.8 KB
Newer Older
1
2
3
4
/*!
 * Copyright (c) 2016 Microsoft Corporation. All rights reserved.
 * Licensed under the MIT License. See LICENSE file in the project root for license information.
 */
Guolin Ke's avatar
Guolin Ke committed
5
6
#include "serial_tree_learner.h"

7
8
9
10
11
#include <LightGBM/network.h>
#include <LightGBM/objective_function.h>
#include <LightGBM/utils/array_args.h>
#include <LightGBM/utils/common.h>

12
13
#include <algorithm>
#include <queue>
14
#include <set>
15
16
17
#include <unordered_map>
#include <utility>

18
19
#include "cost_effective_gradient_boosting.hpp"

Guolin Ke's avatar
Guolin Ke committed
20
21
namespace LightGBM {

Guolin Ke's avatar
Guolin Ke committed
22
SerialTreeLearner::SerialTreeLearner(const Config* config)
23
    : config_(config), col_sampler_(config) {
24
  gradient_discretizer_ = nullptr;
Guolin Ke's avatar
Guolin Ke committed
25
26
27
28
29
}

SerialTreeLearner::~SerialTreeLearner() {
}

30
void SerialTreeLearner::Init(const Dataset* train_data, bool is_constant_hessian) {
Guolin Ke's avatar
Guolin Ke committed
31
32
33
  train_data_ = train_data;
  num_data_ = train_data_->num_data();
  num_features_ = train_data_->num_features();
34
35
  int max_cache_size = 0;
  // Get the max size of pool
Guolin Ke's avatar
Guolin Ke committed
36
37
  if (config_->histogram_pool_size <= 0) {
    max_cache_size = config_->num_leaves;
38
39
40
  } else {
    size_t total_histogram_size = 0;
    for (int i = 0; i < train_data_->num_features(); ++i) {
41
      total_histogram_size += kHistEntrySize * train_data_->FeatureNumBin(i);
42
    }
Guolin Ke's avatar
Guolin Ke committed
43
    max_cache_size = static_cast<int>(config_->histogram_pool_size * 1024 * 1024 / total_histogram_size);
44
45
  }
  // at least need 2 leaves
Guolin Ke's avatar
Guolin Ke committed
46
  max_cache_size = std::max(2, max_cache_size);
Guolin Ke's avatar
Guolin Ke committed
47
  max_cache_size = std::min(max_cache_size, config_->num_leaves);
Guolin Ke's avatar
Guolin Ke committed
48

Guolin Ke's avatar
Guolin Ke committed
49
  // push split information for all leaves
Guolin Ke's avatar
Guolin Ke committed
50
  best_split_per_leaf_.resize(config_->num_leaves);
51
  constraints_.reset(LeafConstraintsBase::Create(config_, config_->num_leaves, train_data_->num_features()));
Guolin Ke's avatar
Guolin Ke committed
52

wxchan's avatar
wxchan committed
53
  // initialize splits for leaf
Guolin Ke's avatar
Guolin Ke committed
54
55
  smaller_leaf_splits_.reset(new LeafSplits(train_data_->num_data(), config_));
  larger_leaf_splits_.reset(new LeafSplits(train_data_->num_data(), config_));
Guolin Ke's avatar
Guolin Ke committed
56
57

  // initialize data partition
Guolin Ke's avatar
Guolin Ke committed
58
  data_partition_.reset(new DataPartition(num_data_, config_->num_leaves));
59
  col_sampler_.SetTrainingData(train_data_);
Guolin Ke's avatar
Guolin Ke committed
60
  // initialize ordered gradients and hessians
Guolin Ke's avatar
Guolin Ke committed
61
62
  ordered_gradients_.resize(num_data_);
  ordered_hessians_.resize(num_data_);
63

64
65
66
67
68
  if (config_->use_quantized_grad) {
    gradient_discretizer_.reset(new GradientDiscretizer(config_->num_grad_quant_bins, config_->num_iterations, config_->seed, is_constant_hessian, config_->stochastic_rounding));
    gradient_discretizer_->Init(num_data_, config_->num_leaves, num_features_, train_data_);
  }

69
  GetShareStates(train_data_, is_constant_hessian, true);
70
71
72
73
  histogram_pool_.DynamicChangeSize(train_data_,
  share_state_->num_hist_total_bin(),
  share_state_->feature_hist_offsets(),
  config_, max_cache_size, config_->num_leaves);
74
  Log::Info("Number of data points in the train set: %d, number of used features: %d", num_data_, num_features_);
75
76
77
  if (CostEfficientGradientBoosting::IsEnable(config_)) {
    cegb_.reset(new CostEfficientGradientBoosting(this));
    cegb_->Init();
78
  }
Guolin Ke's avatar
Guolin Ke committed
79
80
}

81
82
83
void SerialTreeLearner::GetShareStates(const Dataset* dataset,
                                       bool is_constant_hessian,
                                       bool is_first_time) {
84
  if (is_first_time) {
85
86
87
    if (config_->use_quantized_grad) {
      share_state_.reset(dataset->GetShareStates<true, 32>(
        reinterpret_cast<score_t*>(gradient_discretizer_->ordered_int_gradients_and_hessians()), nullptr,
88
        col_sampler_.is_feature_used_bytree(), is_constant_hessian,
89
90
91
92
93
94
95
        config_->force_col_wise, config_->force_row_wise, config_->num_grad_quant_bins));
    } else {
      share_state_.reset(dataset->GetShareStates<false, 0>(
          ordered_gradients_.data(), ordered_hessians_.data(),
          col_sampler_.is_feature_used_bytree(), is_constant_hessian,
          config_->force_col_wise, config_->force_row_wise, config_->num_grad_quant_bins));
    }
96
  } else {
Nikita Titov's avatar
Nikita Titov committed
97
    CHECK_NOTNULL(share_state_);
98
    // cannot change is_hist_col_wise during training
99
100
101
102
103
104
105
106
107
108
109
    if (config_->use_quantized_grad) {
      share_state_.reset(dataset->GetShareStates<true, 32>(
          reinterpret_cast<score_t*>(gradient_discretizer_->ordered_int_gradients_and_hessians()), nullptr,
          col_sampler_.is_feature_used_bytree(), is_constant_hessian,
          share_state_->is_col_wise, !share_state_->is_col_wise, config_->num_grad_quant_bins));
    } else {
      share_state_.reset(dataset->GetShareStates<false, 0>(
          ordered_gradients_.data(), ordered_hessians_.data(), col_sampler_.is_feature_used_bytree(),
          is_constant_hessian, share_state_->is_col_wise,
          !share_state_->is_col_wise, config_->num_grad_quant_bins));
    }
110
  }
Nikita Titov's avatar
Nikita Titov committed
111
  CHECK_NOTNULL(share_state_);
112
113
}

114
115
116
void SerialTreeLearner::ResetTrainingDataInner(const Dataset* train_data,
                                               bool is_constant_hessian,
                                               bool reset_multi_val_bin) {
Guolin Ke's avatar
Guolin Ke committed
117
118
  train_data_ = train_data;
  num_data_ = train_data_->num_data();
119
  CHECK_EQ(num_features_, train_data_->num_features());
Guolin Ke's avatar
Guolin Ke committed
120
121
122
123
124
125
126

  // initialize splits for leaf
  smaller_leaf_splits_->ResetNumData(num_data_);
  larger_leaf_splits_->ResetNumData(num_data_);

  // initialize data partition
  data_partition_->ResetNumData(num_data_);
127
  if (reset_multi_val_bin) {
128
    col_sampler_.SetTrainingData(train_data_);
129
130
    GetShareStates(train_data_, is_constant_hessian, false);
  }
131

Guolin Ke's avatar
Guolin Ke committed
132
133
134
  // initialize ordered gradients and hessians
  ordered_gradients_.resize(num_data_);
  ordered_hessians_.resize(num_data_);
135
136
137
  if (cegb_ != nullptr) {
    cegb_->Init();
  }
Guolin Ke's avatar
Guolin Ke committed
138
}
Guolin Ke's avatar
Guolin Ke committed
139

Guolin Ke's avatar
Guolin Ke committed
140
141
142
void SerialTreeLearner::ResetConfig(const Config* config) {
  if (config_->num_leaves != config->num_leaves) {
    config_ = config;
Guolin Ke's avatar
Guolin Ke committed
143
144
    int max_cache_size = 0;
    // Get the max size of pool
Guolin Ke's avatar
Guolin Ke committed
145
146
    if (config->histogram_pool_size <= 0) {
      max_cache_size = config_->num_leaves;
Guolin Ke's avatar
Guolin Ke committed
147
148
149
    } else {
      size_t total_histogram_size = 0;
      for (int i = 0; i < train_data_->num_features(); ++i) {
150
        total_histogram_size += kHistEntrySize * train_data_->FeatureNumBin(i);
Guolin Ke's avatar
Guolin Ke committed
151
      }
Guolin Ke's avatar
Guolin Ke committed
152
      max_cache_size = static_cast<int>(config_->histogram_pool_size * 1024 * 1024 / total_histogram_size);
Guolin Ke's avatar
Guolin Ke committed
153
154
155
    }
    // at least need 2 leaves
    max_cache_size = std::max(2, max_cache_size);
Guolin Ke's avatar
Guolin Ke committed
156
    max_cache_size = std::min(max_cache_size, config_->num_leaves);
157
158
159
160
    histogram_pool_.DynamicChangeSize(train_data_,
    share_state_->num_hist_total_bin(),
    share_state_->feature_hist_offsets(),
    config_, max_cache_size, config_->num_leaves);
Guolin Ke's avatar
Guolin Ke committed
161
162

    // push split information for all leaves
Guolin Ke's avatar
Guolin Ke committed
163
164
    best_split_per_leaf_.resize(config_->num_leaves);
    data_partition_->ResetLeaves(config_->num_leaves);
Guolin Ke's avatar
Guolin Ke committed
165
  } else {
Guolin Ke's avatar
Guolin Ke committed
166
    config_ = config;
Guolin Ke's avatar
Guolin Ke committed
167
  }
168
  col_sampler_.SetConfig(config_);
169
  histogram_pool_.ResetConfig(train_data_, config_);
170
  if (CostEfficientGradientBoosting::IsEnable(config_)) {
171
172
173
    if (cegb_ == nullptr) {
      cegb_.reset(new CostEfficientGradientBoosting(this));
    }
174
175
    cegb_->Init();
  }
176
  constraints_.reset(LeafConstraintsBase::Create(config_, config_->num_leaves, train_data_->num_features()));
Guolin Ke's avatar
Guolin Ke committed
177
178
}

179
Tree* SerialTreeLearner::Train(const score_t* gradients, const score_t *hessians, bool /*is_first_tree*/) {
180
  Common::FunctionTimer fun_timer("SerialTreeLearner::Train", global_timer);
Guolin Ke's avatar
Guolin Ke committed
181
182
  gradients_ = gradients;
  hessians_ = hessians;
183
  int num_threads = OMP_NUM_THREADS();
Nikita Titov's avatar
Nikita Titov committed
184
  if (share_state_->num_threads != num_threads && share_state_->num_threads > 0) {
185
    Log::Warning(
Nikita Titov's avatar
Nikita Titov committed
186
187
        "Detected that num_threads changed during training (from %d to %d), "
        "it may cause unexpected errors.",
188
189
190
        share_state_->num_threads, num_threads);
  }
  share_state_->num_threads = num_threads;
Nikita Titov's avatar
Nikita Titov committed
191

192
193
194
195
  if (config_->use_quantized_grad) {
    gradient_discretizer_->DiscretizeGradients(num_data_, gradients_, hessians_);
  }

Guolin Ke's avatar
Guolin Ke committed
196
197
  // some initial works before training
  BeforeTrain();
Guolin Ke's avatar
Guolin Ke committed
198

199
  bool track_branch_features = !(config_->interaction_constraints_vector.empty());
200
  auto tree = std::unique_ptr<Tree>(new Tree(config_->num_leaves, track_branch_features, false));
Guolin Ke's avatar
Guolin Ke committed
201
202
  auto tree_ptr = tree.get();
  constraints_->ShareTreePointer(tree_ptr);
203

Guolin Ke's avatar
Guolin Ke committed
204
205
  // root leaf
  int left_leaf = 0;
206
  int cur_depth = 1;
Guolin Ke's avatar
Guolin Ke committed
207
208
  // only root leaf can be splitted on first time
  int right_leaf = -1;
209

Guolin Ke's avatar
Guolin Ke committed
210
  int init_splits = ForceSplits(tree_ptr, &left_leaf, &right_leaf, &cur_depth);
211

Guolin Ke's avatar
Guolin Ke committed
212
  for (int split = init_splits; split < config_->num_leaves - 1; ++split) {
Guolin Ke's avatar
Guolin Ke committed
213
    // some initial works before finding best split
Guolin Ke's avatar
Guolin Ke committed
214
    if (BeforeFindBestSplit(tree_ptr, left_leaf, right_leaf)) {
Guolin Ke's avatar
Guolin Ke committed
215
      // find best threshold for every feature
Guolin Ke's avatar
Guolin Ke committed
216
      FindBestSplits(tree_ptr);
217
    }
Guolin Ke's avatar
Guolin Ke committed
218
219
220
221
222
223
    // Get a leaf with max split gain
    int best_leaf = static_cast<int>(ArrayArgs<SplitInfo>::ArgMax(best_split_per_leaf_));
    // Get split information for best leaf
    const SplitInfo& best_leaf_SplitInfo = best_split_per_leaf_[best_leaf];
    // cannot split, quit
    if (best_leaf_SplitInfo.gain <= 0.0) {
Guolin Ke's avatar
Guolin Ke committed
224
      Log::Warning("No further splits with positive gain, best gain: %f", best_leaf_SplitInfo.gain);
Guolin Ke's avatar
Guolin Ke committed
225
226
227
      break;
    }
    // split tree with best leaf
Guolin Ke's avatar
Guolin Ke committed
228
    Split(tree_ptr, best_leaf, &left_leaf, &right_leaf);
229
    cur_depth = std::max(cur_depth, tree->leaf_depth(left_leaf));
Guolin Ke's avatar
Guolin Ke committed
230
  }
231

232
233
234
235
236
  if (config_->use_quantized_grad && config_->quant_train_renew_leaf) {
    gradient_discretizer_->RenewIntGradTreeOutput(tree.get(), config_, data_partition_.get(), gradients_, hessians_,
      [this] (int leaf_index) { return GetGlobalDataCountInLeaf(leaf_index); });
  }

237
  Log::Debug("Trained a tree with leaves = %d and depth = %d", tree->num_leaves(), cur_depth);
Guolin Ke's avatar
Guolin Ke committed
238
  return tree.release();
Guolin Ke's avatar
Guolin Ke committed
239
240
}

241
Tree* SerialTreeLearner::FitByExistingTree(const Tree* old_tree, const score_t* gradients, const score_t *hessians) const {
Guolin Ke's avatar
Guolin Ke committed
242
  auto tree = std::unique_ptr<Tree>(new Tree(*old_tree));
Nikita Titov's avatar
Nikita Titov committed
243
  CHECK_GE(data_partition_->num_leaves(), tree->num_leaves());
244
  OMP_INIT_EX();
Guolin Ke's avatar
Guolin Ke committed
245
  #pragma omp parallel for schedule(static)
246
  for (int i = 0; i < tree->num_leaves(); ++i) {
247
    OMP_LOOP_EX_BEGIN();
Guolin Ke's avatar
Guolin Ke committed
248
249
250
    data_size_t cnt_leaf_data = 0;
    auto tmp_idx = data_partition_->GetIndexOnLeaf(i, &cnt_leaf_data);
    double sum_grad = 0.0f;
251
    double sum_hess = kEpsilon;
Guolin Ke's avatar
Guolin Ke committed
252
253
254
255
256
    for (data_size_t j = 0; j < cnt_leaf_data; ++j) {
      auto idx = tmp_idx[j];
      sum_grad += gradients[idx];
      sum_hess += hessians[idx];
    }
Belinda Trotta's avatar
Belinda Trotta committed
257
258
259
260
261
262
263
264
265
266
    double output;
    if ((config_->path_smooth > kEpsilon) & (i > 0)) {
      output = FeatureHistogram::CalculateSplittedLeafOutput<true, true, true>(
          sum_grad, sum_hess, config_->lambda_l1, config_->lambda_l2,
          config_->max_delta_step, config_->path_smooth, cnt_leaf_data, tree->leaf_parent(i));
    } else {
      output = FeatureHistogram::CalculateSplittedLeafOutput<true, true, false>(
          sum_grad, sum_hess, config_->lambda_l1, config_->lambda_l2,
          config_->max_delta_step, config_->path_smooth, cnt_leaf_data, 0);
    }
Guolin Ke's avatar
Guolin Ke committed
267
268
269
    auto old_leaf_output = tree->LeafOutput(i);
    auto new_leaf_output = output * tree->shrinkage();
    tree->SetLeafOutput(i, config_->refit_decay_rate * old_leaf_output + (1.0 - config_->refit_decay_rate) * new_leaf_output);
270
    OMP_LOOP_EX_END();
Guolin Ke's avatar
Guolin Ke committed
271
  }
272
  OMP_THROW_EX();
Guolin Ke's avatar
Guolin Ke committed
273
274
275
  return tree.release();
}

276
277
Tree* SerialTreeLearner::FitByExistingTree(const Tree* old_tree, const std::vector<int>& leaf_pred,
                                           const score_t* gradients, const score_t *hessians) const {
278
279
280
281
  data_partition_->ResetByLeafPred(leaf_pred, old_tree->num_leaves());
  return FitByExistingTree(old_tree, gradients, hessians);
}

Guolin Ke's avatar
Guolin Ke committed
282
void SerialTreeLearner::BeforeTrain() {
283
  Common::FunctionTimer fun_timer("SerialTreeLearner::BeforeTrain", global_timer);
284
285
  // reset histogram pool
  histogram_pool_.ResetMap();
Guolin Ke's avatar
Guolin Ke committed
286

287
288
  col_sampler_.ResetByTree();
  train_data_->InitTrain(col_sampler_.is_feature_used_bytree(), share_state_.get());
Guolin Ke's avatar
Guolin Ke committed
289
290
291
  // initialize data partition
  data_partition_->Init();

292
293
  constraints_->Reset();

Guolin Ke's avatar
Guolin Ke committed
294
  // reset the splits for leaves
Guolin Ke's avatar
Guolin Ke committed
295
  for (int i = 0; i < config_->num_leaves; ++i) {
Guolin Ke's avatar
Guolin Ke committed
296
297
298
299
300
301
    best_split_per_leaf_[i].Reset();
  }

  // Sumup for root
  if (data_partition_->leaf_count(0) == num_data_) {
    // use all data
302
303
304
305
306
307
308
309
    if (!config_->use_quantized_grad) {
      smaller_leaf_splits_->Init(gradients_, hessians_);
    } else {
      smaller_leaf_splits_->Init(
        gradient_discretizer_->discretized_gradients_and_hessians(),
        gradient_discretizer_->grad_scale(),
        gradient_discretizer_->hess_scale());
    }
Guolin Ke's avatar
Guolin Ke committed
310
311
  } else {
    // use bagging, only use part of data
312
313
314
315
316
317
318
319
320
    if (!config_->use_quantized_grad) {
      smaller_leaf_splits_->Init(0, data_partition_.get(), gradients_, hessians_);
    } else {
      smaller_leaf_splits_->Init(
        0, data_partition_.get(),
        gradient_discretizer_->discretized_gradients_and_hessians(),
        gradient_discretizer_->grad_scale(),
        gradient_discretizer_->hess_scale());
    }
Guolin Ke's avatar
Guolin Ke committed
321
322
323
  }

  larger_leaf_splits_->Init();
324
325
326
327

  if (cegb_ != nullptr) {
    cegb_->BeforeTrain();
  }
328
329
330
331

  if (config_->use_quantized_grad && config_->tree_learner != std::string("data")) {
    gradient_discretizer_->SetNumBitsInHistogramBin<false>(0, -1, data_partition_->leaf_count(0), 0);
  }
Guolin Ke's avatar
Guolin Ke committed
332
333
}

Guolin Ke's avatar
Guolin Ke committed
334
bool SerialTreeLearner::BeforeFindBestSplit(const Tree* tree, int left_leaf, int right_leaf) {
335
  Common::FunctionTimer fun_timer("SerialTreeLearner::BeforeFindBestSplit", global_timer);
Guolin Ke's avatar
Guolin Ke committed
336
  // check depth of current leaf
Guolin Ke's avatar
Guolin Ke committed
337
  if (config_->max_depth > 0) {
Guolin Ke's avatar
Guolin Ke committed
338
    // only need to check left leaf, since right leaf is in same level of left leaf
Guolin Ke's avatar
Guolin Ke committed
339
    if (tree->leaf_depth(left_leaf) >= config_->max_depth) {
Guolin Ke's avatar
Guolin Ke committed
340
341
342
343
344
345
346
      best_split_per_leaf_[left_leaf].gain = kMinScore;
      if (right_leaf >= 0) {
        best_split_per_leaf_[right_leaf].gain = kMinScore;
      }
      return false;
    }
  }
Guolin Ke's avatar
Guolin Ke committed
347
348
349
  data_size_t num_data_in_left_child = GetGlobalDataCountInLeaf(left_leaf);
  data_size_t num_data_in_right_child = GetGlobalDataCountInLeaf(right_leaf);
  // no enough data to continue
Guolin Ke's avatar
Guolin Ke committed
350
351
  if (num_data_in_right_child < static_cast<data_size_t>(config_->min_data_in_leaf * 2)
      && num_data_in_left_child < static_cast<data_size_t>(config_->min_data_in_leaf * 2)) {
Guolin Ke's avatar
Guolin Ke committed
352
353
354
355
356
357
    best_split_per_leaf_[left_leaf].gain = kMinScore;
    if (right_leaf >= 0) {
      best_split_per_leaf_[right_leaf].gain = kMinScore;
    }
    return false;
  }
358
  parent_leaf_histogram_array_ = nullptr;
Guolin Ke's avatar
Guolin Ke committed
359
360
  // only have root
  if (right_leaf < 0) {
361
    histogram_pool_.Get(left_leaf, &smaller_leaf_histogram_array_);
Guolin Ke's avatar
Guolin Ke committed
362
363
    larger_leaf_histogram_array_ = nullptr;
  } else if (num_data_in_left_child < num_data_in_right_child) {
Hui Xue's avatar
Hui Xue committed
364
    // put parent(left) leaf's histograms into larger leaf's histograms
365
366
367
    if (histogram_pool_.Get(left_leaf, &larger_leaf_histogram_array_)) { parent_leaf_histogram_array_ = larger_leaf_histogram_array_; }
    histogram_pool_.Move(left_leaf, right_leaf);
    histogram_pool_.Get(left_leaf, &smaller_leaf_histogram_array_);
Guolin Ke's avatar
Guolin Ke committed
368
  } else {
Hui Xue's avatar
Hui Xue committed
369
    // put parent(left) leaf's histograms to larger leaf's histograms
370
371
    if (histogram_pool_.Get(left_leaf, &larger_leaf_histogram_array_)) { parent_leaf_histogram_array_ = larger_leaf_histogram_array_; }
    histogram_pool_.Get(right_leaf, &smaller_leaf_histogram_array_);
Guolin Ke's avatar
Guolin Ke committed
372
373
374
375
  }
  return true;
}

376
void SerialTreeLearner::FindBestSplits(const Tree* tree) {
377
378
379
380
  FindBestSplits(tree, nullptr);
}

void SerialTreeLearner::FindBestSplits(const Tree* tree, const std::set<int>* force_features) {
Guolin Ke's avatar
Guolin Ke committed
381
  std::vector<int8_t> is_feature_used(num_features_, 0);
382
  #pragma omp parallel for schedule(static, 256) if (num_features_ >= 512)
Guolin Ke's avatar
Guolin Ke committed
383
  for (int feature_index = 0; feature_index < num_features_; ++feature_index) {
384
    if (!col_sampler_.is_feature_used_bytree()[feature_index] && (force_features == nullptr || force_features->find(feature_index) == force_features->end())) continue;
Guolin Ke's avatar
Guolin Ke committed
385
386
387
388
389
390
391
392
    if (parent_leaf_histogram_array_ != nullptr
        && !parent_leaf_histogram_array_[feature_index].is_splittable()) {
      smaller_leaf_histogram_array_[feature_index].set_is_splittable(false);
      continue;
    }
    is_feature_used[feature_index] = 1;
  }
  bool use_subtract = parent_leaf_histogram_array_ != nullptr;
393

Guolin Ke's avatar
Guolin Ke committed
394
  ConstructHistograms(is_feature_used, use_subtract);
395
  FindBestSplitsFromHistograms(is_feature_used, use_subtract, tree);
Guolin Ke's avatar
Guolin Ke committed
396
397
}

398
399
400
401
void SerialTreeLearner::ConstructHistograms(
    const std::vector<int8_t>& is_feature_used, bool use_subtract) {
  Common::FunctionTimer fun_timer("SerialTreeLearner::ConstructHistograms",
                                  global_timer);
Guolin Ke's avatar
Guolin Ke committed
402
  // construct smaller leaf
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
  if (config_->use_quantized_grad) {
    const uint8_t smaller_leaf_num_bits = gradient_discretizer_->GetHistBitsInLeaf<false>(smaller_leaf_splits_->leaf_index());
    hist_t* ptr_smaller_leaf_hist_data =
        smaller_leaf_num_bits <= 16 ?
        reinterpret_cast<hist_t*>(smaller_leaf_histogram_array_[0].RawDataInt16() - kHistOffset) :
        reinterpret_cast<hist_t*>(smaller_leaf_histogram_array_[0].RawDataInt32() - kHistOffset);
    #define SMALLER_LEAF_ARGS \
      is_feature_used, smaller_leaf_splits_->data_indices(), \
      smaller_leaf_splits_->num_data_in_leaf(), \
      reinterpret_cast<const score_t*>(gradient_discretizer_->discretized_gradients_and_hessians()), \
      nullptr, \
      reinterpret_cast<score_t*>(gradient_discretizer_->ordered_int_gradients_and_hessians()), \
      nullptr, \
      share_state_.get(), \
      reinterpret_cast<hist_t*>(ptr_smaller_leaf_hist_data)
    if (smaller_leaf_num_bits <= 16) {
      train_data_->ConstructHistograms<true, 16>(SMALLER_LEAF_ARGS);
    } else {
      train_data_->ConstructHistograms<true, 32>(SMALLER_LEAF_ARGS);
    }
    #undef SMALLER_LEAF_ARGS
    if (larger_leaf_histogram_array_ && !use_subtract) {
      const uint8_t larger_leaf_num_bits = gradient_discretizer_->GetHistBitsInLeaf<false>(larger_leaf_splits_->leaf_index());
      hist_t* ptr_larger_leaf_hist_data =
        larger_leaf_num_bits <= 16 ?
        reinterpret_cast<hist_t*>(larger_leaf_histogram_array_[0].RawDataInt16() - kHistOffset) :
        reinterpret_cast<hist_t*>(larger_leaf_histogram_array_[0].RawDataInt32() - kHistOffset);
      #define LARGER_LEAF_ARGS \
        is_feature_used, larger_leaf_splits_->data_indices(), \
        larger_leaf_splits_->num_data_in_leaf(), \
        reinterpret_cast<const score_t*>(gradient_discretizer_->discretized_gradients_and_hessians()), \
        nullptr, \
        reinterpret_cast<score_t*>(gradient_discretizer_->ordered_int_gradients_and_hessians()), \
        nullptr, \
        share_state_.get(), \
        reinterpret_cast<hist_t*>(ptr_larger_leaf_hist_data)
      if (larger_leaf_num_bits <= 16) {
        train_data_->ConstructHistograms<true, 16>(LARGER_LEAF_ARGS);
      } else {
        train_data_->ConstructHistograms<true, 32>(LARGER_LEAF_ARGS);
      }
      #undef LARGER_LEAF_ARGS
    }
  } else {
    hist_t* ptr_smaller_leaf_hist_data =
        smaller_leaf_histogram_array_[0].RawData() - kHistOffset;
    train_data_->ConstructHistograms<false, 0>(
        is_feature_used, smaller_leaf_splits_->data_indices(),
        smaller_leaf_splits_->num_data_in_leaf(), gradients_, hessians_,
452
        ordered_gradients_.data(), ordered_hessians_.data(), share_state_.get(),
453
454
455
456
457
458
459
460
461
462
463
        ptr_smaller_leaf_hist_data);
    if (larger_leaf_histogram_array_ != nullptr && !use_subtract) {
      // construct larger leaf
      hist_t* ptr_larger_leaf_hist_data =
          larger_leaf_histogram_array_[0].RawData() - kHistOffset;
      train_data_->ConstructHistograms<false, 0>(
          is_feature_used, larger_leaf_splits_->data_indices(),
          larger_leaf_splits_->num_data_in_leaf(), gradients_, hessians_,
          ordered_gradients_.data(), ordered_hessians_.data(), share_state_.get(),
          ptr_larger_leaf_hist_data);
    }
Guolin Ke's avatar
Guolin Ke committed
464
  }
465
466
}

Guolin Ke's avatar
Guolin Ke committed
467
void SerialTreeLearner::FindBestSplitsFromHistograms(
468
    const std::vector<int8_t>& is_feature_used, bool use_subtract, const Tree* tree) {
Guolin Ke's avatar
Guolin Ke committed
469
470
  Common::FunctionTimer fun_timer(
      "SerialTreeLearner::FindBestSplitsFromHistograms", global_timer);
471
472
  std::vector<SplitInfo> smaller_best(share_state_->num_threads);
  std::vector<SplitInfo> larger_best(share_state_->num_threads);
473
474
  std::vector<int8_t> smaller_node_used_features = col_sampler_.GetByNode(tree, smaller_leaf_splits_->leaf_index());
  std::vector<int8_t> larger_node_used_features;
475
476
477
478
479
  double smaller_leaf_parent_output = GetParentOutput(tree, smaller_leaf_splits_.get());
  double larger_leaf_parent_output = 0;
  if (larger_leaf_splits_ != nullptr && larger_leaf_splits_->leaf_index() >= 0) {
    larger_leaf_parent_output = GetParentOutput(tree, larger_leaf_splits_.get());
  }
480
481
482
  if (larger_leaf_splits_->leaf_index() >= 0) {
    larger_node_used_features = col_sampler_.GetByNode(tree, larger_leaf_splits_->leaf_index());
  }
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502

  if (use_subtract && config_->use_quantized_grad) {
    const int parent_index = std::min(smaller_leaf_splits_->leaf_index(), larger_leaf_splits_->leaf_index());
    const uint8_t parent_hist_bits = gradient_discretizer_->GetHistBitsInNode<false>(parent_index);
    const uint8_t larger_hist_bits = gradient_discretizer_->GetHistBitsInLeaf<false>(larger_leaf_splits_->leaf_index());
    if (parent_hist_bits > 16 && larger_hist_bits <= 16) {
      OMP_INIT_EX();
      #pragma omp parallel for schedule(static) num_threads(share_state_->num_threads)
      for (int feature_index = 0; feature_index < num_features_; ++feature_index) {
        OMP_LOOP_EX_BEGIN();
        if (!is_feature_used[feature_index]) {
          continue;
        }
        larger_leaf_histogram_array_[feature_index].CopyToBuffer(gradient_discretizer_->GetChangeHistBitsBuffer(feature_index));
        OMP_LOOP_EX_END();
      }
      OMP_THROW_EX();
    }
  }

503
  OMP_INIT_EX();
Guolin Ke's avatar
Guolin Ke committed
504
// find splits
505
#pragma omp parallel for schedule(static) num_threads(share_state_->num_threads)
Guolin Ke's avatar
Guolin Ke committed
506
  for (int feature_index = 0; feature_index < num_features_; ++feature_index) {
507
    OMP_LOOP_EX_BEGIN();
Guolin Ke's avatar
Guolin Ke committed
508
509
510
    if (!is_feature_used[feature_index]) {
      continue;
    }
Guolin Ke's avatar
Guolin Ke committed
511
    const int tid = omp_get_thread_num();
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
    if (config_->use_quantized_grad) {
      const uint8_t hist_bits_bin = gradient_discretizer_->GetHistBitsInLeaf<false>(smaller_leaf_splits_->leaf_index());
      const int64_t int_sum_gradient_and_hessian = smaller_leaf_splits_->int_sum_gradients_and_hessians();
      if (hist_bits_bin <= 16) {
        train_data_->FixHistogramInt<int32_t, int32_t, 16, 16>(
            feature_index, int_sum_gradient_and_hessian,
            reinterpret_cast<hist_t*>(smaller_leaf_histogram_array_[feature_index].RawDataInt16()));
      } else {
        train_data_->FixHistogramInt<int64_t, int64_t, 32, 32>(
            feature_index, int_sum_gradient_and_hessian,
            reinterpret_cast<hist_t*>(smaller_leaf_histogram_array_[feature_index].RawDataInt32()));
      }
    } else {
      train_data_->FixHistogram(
          feature_index, smaller_leaf_splits_->sum_gradients(),
          smaller_leaf_splits_->sum_hessians(),
          smaller_leaf_histogram_array_[feature_index].RawData());
    }
530
    int real_fidx = train_data_->RealFeatureIndex(feature_index);
531
532
533
534
535

    ComputeBestSplitForFeature(smaller_leaf_histogram_array_, feature_index,
                               real_fidx,
                               smaller_node_used_features[feature_index],
                               smaller_leaf_splits_->num_data_in_leaf(),
536
537
                               smaller_leaf_splits_.get(), &smaller_best[tid],
                               smaller_leaf_parent_output);
538

Guolin Ke's avatar
Guolin Ke committed
539
    // only has root leaf
Guolin Ke's avatar
Guolin Ke committed
540
541
542
543
    if (larger_leaf_splits_ == nullptr ||
        larger_leaf_splits_->leaf_index() < 0) {
      continue;
    }
Guolin Ke's avatar
Guolin Ke committed
544

Guolin Ke's avatar
Guolin Ke committed
545
    if (use_subtract) {
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
      if (config_->use_quantized_grad) {
        const int parent_index = std::min(smaller_leaf_splits_->leaf_index(), larger_leaf_splits_->leaf_index());
        const uint8_t parent_hist_bits = gradient_discretizer_->GetHistBitsInNode<false>(parent_index);
        const uint8_t smaller_hist_bits = gradient_discretizer_->GetHistBitsInLeaf<false>(smaller_leaf_splits_->leaf_index());
        const uint8_t larger_hist_bits = gradient_discretizer_->GetHistBitsInLeaf<false>(larger_leaf_splits_->leaf_index());
        if (parent_hist_bits <= 16) {
          CHECK_LE(smaller_hist_bits, 16);
          CHECK_LE(larger_hist_bits, 16);
          larger_leaf_histogram_array_[feature_index].Subtract<true, int32_t, int32_t, int32_t, 16, 16, 16>(
              smaller_leaf_histogram_array_[feature_index]);
        } else if (larger_hist_bits <= 16) {
          CHECK_LE(smaller_hist_bits, 16);
          larger_leaf_histogram_array_[feature_index].Subtract<true, int64_t, int32_t, int32_t, 32, 16, 16>(
              smaller_leaf_histogram_array_[feature_index], gradient_discretizer_->GetChangeHistBitsBuffer(feature_index));
        } else if (smaller_hist_bits <= 16) {
          larger_leaf_histogram_array_[feature_index].Subtract<true, int64_t, int32_t, int64_t, 32, 16, 32>(
              smaller_leaf_histogram_array_[feature_index]);
        } else {
          larger_leaf_histogram_array_[feature_index].Subtract<true, int64_t, int64_t, int64_t, 32, 32, 32>(
              smaller_leaf_histogram_array_[feature_index]);
        }
      } else {
        larger_leaf_histogram_array_[feature_index].Subtract<false>(
            smaller_leaf_histogram_array_[feature_index]);
      }
571
    } else {
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
      if (config_->use_quantized_grad) {
        const int64_t int_sum_gradient_and_hessian = larger_leaf_splits_->int_sum_gradients_and_hessians();
        const uint8_t hist_bits_bin = gradient_discretizer_->GetHistBitsInLeaf<false>(larger_leaf_splits_->leaf_index());
        if (hist_bits_bin <= 16) {
          train_data_->FixHistogramInt<int32_t, int32_t, 16, 16>(
              feature_index, int_sum_gradient_and_hessian,
              reinterpret_cast<hist_t*>(larger_leaf_histogram_array_[feature_index].RawDataInt16()));
        } else {
          train_data_->FixHistogramInt<int64_t, int64_t, 32, 32>(
              feature_index, int_sum_gradient_and_hessian,
              reinterpret_cast<hist_t*>(larger_leaf_histogram_array_[feature_index].RawDataInt32()));
        }
      } else {
        train_data_->FixHistogram(
            feature_index, larger_leaf_splits_->sum_gradients(),
            larger_leaf_splits_->sum_hessians(),
            larger_leaf_histogram_array_[feature_index].RawData());
      }
590
    }
591
592
593
594
595

    ComputeBestSplitForFeature(larger_leaf_histogram_array_, feature_index,
                               real_fidx,
                               larger_node_used_features[feature_index],
                               larger_leaf_splits_->num_data_in_leaf(),
596
597
                               larger_leaf_splits_.get(), &larger_best[tid],
                               larger_leaf_parent_output);
598

599
    OMP_LOOP_EX_END();
Guolin Ke's avatar
Guolin Ke committed
600
  }
601
  OMP_THROW_EX();
Guolin Ke's avatar
Guolin Ke committed
602
  auto smaller_best_idx = ArrayArgs<SplitInfo>::ArgMax(smaller_best);
603
  int leaf = smaller_leaf_splits_->leaf_index();
Guolin Ke's avatar
Guolin Ke committed
604
605
  best_split_per_leaf_[leaf] = smaller_best[smaller_best_idx];

Guolin Ke's avatar
Guolin Ke committed
606
607
  if (larger_leaf_splits_ != nullptr &&
      larger_leaf_splits_->leaf_index() >= 0) {
608
    leaf = larger_leaf_splits_->leaf_index();
Guolin Ke's avatar
Guolin Ke committed
609
610
611
612
613
    auto larger_best_idx = ArrayArgs<SplitInfo>::ArgMax(larger_best);
    best_split_per_leaf_[leaf] = larger_best[larger_best_idx];
  }
}

614
615
616
617
618
619
int32_t SerialTreeLearner::ForceSplits(Tree* tree, int* left_leaf,
                                       int* right_leaf, int *cur_depth) {
  bool abort_last_forced_split = false;
  if (forced_split_json_ == nullptr) {
    return 0;
  }
620
621
622
623
  int32_t result_count = 0;
  // start at root leaf
  *left_leaf = 0;
  std::queue<std::pair<Json, int>> q;
624
  Json left = *forced_split_json_;
625
626
627
  Json right;
  bool left_smaller = true;
  std::unordered_map<int, SplitInfo> forceSplitMap;
628
  q.push(std::make_pair(left, *left_leaf));
629
630
631

  // Histogram construction require parent features.
  std::set<int> force_split_features = FindAllForceFeatures(*forced_split_json_);
632
  while (!q.empty()) {
633
    if (BeforeFindBestSplit(tree, *left_leaf, *right_leaf)) {
634
      FindBestSplits(tree, &force_split_features);
635
    }
636

637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
    // then, compute own splits
    SplitInfo left_split;
    SplitInfo right_split;

    if (!left.is_null()) {
      const int left_feature = left["feature"].int_value();
      const double left_threshold_double = left["threshold"].number_value();
      const int left_inner_feature_index = train_data_->InnerFeatureIndex(left_feature);
      const uint32_t left_threshold = train_data_->BinThreshold(
              left_inner_feature_index, left_threshold_double);
      auto leaf_histogram_array = (left_smaller) ? smaller_leaf_histogram_array_ : larger_leaf_histogram_array_;
      auto left_leaf_splits = (left_smaller) ? smaller_leaf_splits_.get() : larger_leaf_splits_.get();
      leaf_histogram_array[left_inner_feature_index].GatherInfoForThreshold(
              left_leaf_splits->sum_gradients(),
              left_leaf_splits->sum_hessians(),
              left_threshold,
              left_leaf_splits->num_data_in_leaf(),
Belinda Trotta's avatar
Belinda Trotta committed
654
              left_leaf_splits->weight(),
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
              &left_split);
      left_split.feature = left_feature;
      forceSplitMap[*left_leaf] = left_split;
      if (left_split.gain < 0) {
        forceSplitMap.erase(*left_leaf);
      }
    }

    if (!right.is_null()) {
      const int right_feature = right["feature"].int_value();
      const double right_threshold_double = right["threshold"].number_value();
      const int right_inner_feature_index = train_data_->InnerFeatureIndex(right_feature);
      const uint32_t right_threshold = train_data_->BinThreshold(
              right_inner_feature_index, right_threshold_double);
      auto leaf_histogram_array = (left_smaller) ? larger_leaf_histogram_array_ : smaller_leaf_histogram_array_;
      auto right_leaf_splits = (left_smaller) ? larger_leaf_splits_.get() : smaller_leaf_splits_.get();
      leaf_histogram_array[right_inner_feature_index].GatherInfoForThreshold(
        right_leaf_splits->sum_gradients(),
        right_leaf_splits->sum_hessians(),
        right_threshold,
        right_leaf_splits->num_data_in_leaf(),
Belinda Trotta's avatar
Belinda Trotta committed
676
        right_leaf_splits->weight(),
677
678
679
680
681
682
683
684
685
686
687
688
689
        &right_split);
      right_split.feature = right_feature;
      forceSplitMap[*right_leaf] = right_split;
      if (right_split.gain < 0) {
        forceSplitMap.erase(*right_leaf);
      }
    }

    std::pair<Json, int> pair = q.front();
    q.pop();
    int current_leaf = pair.second;
    // split info should exist because searching in bfs fashion - should have added from parent
    if (forceSplitMap.find(current_leaf) == forceSplitMap.end()) {
690
        abort_last_forced_split = true;
691
692
        break;
    }
693
694
695
696
    best_split_per_leaf_[current_leaf] = forceSplitMap[current_leaf];
    Split(tree, current_leaf, left_leaf, right_leaf);
    left_smaller = best_split_per_leaf_[current_leaf].left_count <
                   best_split_per_leaf_[current_leaf].right_count;
697
698
699
700
    left = Json();
    right = Json();
    if ((pair.first).object_items().count("left") > 0) {
      left = (pair.first)["left"];
701
702
703
      if (left.object_items().count("feature") > 0 && left.object_items().count("threshold") > 0) {
        q.push(std::make_pair(left, *left_leaf));
      }
704
705
706
    }
    if ((pair.first).object_items().count("right") > 0) {
      right = (pair.first)["right"];
707
708
709
      if (right.object_items().count("feature") > 0 && right.object_items().count("threshold") > 0) {
        q.push(std::make_pair(right, *right_leaf));
      }
710
711
712
713
    }
    result_count++;
    *(cur_depth) = std::max(*(cur_depth), tree->leaf_depth(*left_leaf));
  }
714
715
716
717
718
719
720
721
722
723
724
  if (abort_last_forced_split) {
    int best_leaf =
        static_cast<int>(ArrayArgs<SplitInfo>::ArgMax(best_split_per_leaf_));
    const SplitInfo& best_leaf_SplitInfo = best_split_per_leaf_[best_leaf];
    if (best_leaf_SplitInfo.gain <= 0.0) {
      Log::Warning("No further splits with positive gain, best gain: %f",
                   best_leaf_SplitInfo.gain);
      return config_->num_leaves;
    }
    Split(tree, best_leaf, left_leaf, right_leaf);
    *(cur_depth) = std::max(*(cur_depth), tree->leaf_depth(*left_leaf));
725
    ++result_count;
726
  }
727
728
  return result_count;
}
Guolin Ke's avatar
Guolin Ke committed
729

730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
std::set<int> SerialTreeLearner::FindAllForceFeatures(Json force_split_leaf_setting) {
  std::set<int> force_features;
  std::queue<Json> force_split_leafs;

  force_split_leafs.push(force_split_leaf_setting);

  while (!force_split_leafs.empty()) {
    Json split_leaf = force_split_leafs.front();
    force_split_leafs.pop();

    const int feature_index = split_leaf["feature"].int_value();
    const int feature_inner_index = train_data_->InnerFeatureIndex(feature_index);
    force_features.insert(feature_inner_index);

    if (split_leaf.object_items().count("left") > 0) {
      force_split_leafs.push(split_leaf["left"]);
    }

    if (split_leaf.object_items().count("right") > 0) {
      force_split_leafs.push(split_leaf["right"]);
    }
  }

  return force_features;
}

756
757
758
void SerialTreeLearner::SplitInner(Tree* tree, int best_leaf, int* left_leaf,
                                   int* right_leaf, bool update_cnt) {
  Common::FunctionTimer fun_timer("SerialTreeLearner::SplitInner", global_timer);
759
  SplitInfo& best_split_info = best_split_per_leaf_[best_leaf];
760
761
  const int inner_feature_index =
      train_data_->InnerFeatureIndex(best_split_info.feature);
762
  if (cegb_ != nullptr) {
763
764
    cegb_->UpdateLeafBestSplits(tree, best_leaf, &best_split_info,
                                &best_split_per_leaf_);
765
  }
766
  *left_leaf = best_leaf;
767
768
  auto next_leaf_id = tree->NextLeafId();

769
  // update before tree split
770
  constraints_->BeforeSplit(best_leaf, next_leaf_id,
771
772
                            best_split_info.monotone_type);

773
774
775
  bool is_numerical_split =
      train_data_->FeatureBinMapper(inner_feature_index)->bin_type() ==
      BinType::NumericalBin;
Guolin Ke's avatar
Guolin Ke committed
776
  if (is_numerical_split) {
777
778
    auto threshold_double = train_data_->RealThreshold(
        inner_feature_index, best_split_info.threshold);
779
    data_partition_->Split(best_leaf, train_data_, inner_feature_index,
780
781
782
783
784
785
786
                           &best_split_info.threshold, 1,
                           best_split_info.default_left, next_leaf_id);
    if (update_cnt) {
      // don't need to update this in data-based parallel model
      best_split_info.left_count = data_partition_->leaf_count(*left_leaf);
      best_split_info.right_count = data_partition_->leaf_count(next_leaf_id);
    }
787
    // split tree, will return right leaf
788
789
790
791
792
793
794
795
796
    *right_leaf = tree->Split(
        best_leaf, inner_feature_index, best_split_info.feature,
        best_split_info.threshold, threshold_double,
        static_cast<double>(best_split_info.left_output),
        static_cast<double>(best_split_info.right_output),
        static_cast<data_size_t>(best_split_info.left_count),
        static_cast<data_size_t>(best_split_info.right_count),
        static_cast<double>(best_split_info.left_sum_hessian),
        static_cast<double>(best_split_info.right_sum_hessian),
797
798
        // store the true split gain in tree model
        static_cast<float>(best_split_info.gain + config_->min_gain_to_split),
799
800
        train_data_->FeatureBinMapper(inner_feature_index)->missing_type(),
        best_split_info.default_left);
801
  } else {
802
803
804
    std::vector<uint32_t> cat_bitset_inner =
        Common::ConstructBitset(best_split_info.cat_threshold.data(),
                                best_split_info.num_cat_threshold);
805
806
    std::vector<int> threshold_int(best_split_info.num_cat_threshold);
    for (int i = 0; i < best_split_info.num_cat_threshold; ++i) {
807
808
      threshold_int[i] = static_cast<int>(train_data_->RealThreshold(
          inner_feature_index, best_split_info.cat_threshold[i]));
809
    }
810
811
    std::vector<uint32_t> cat_bitset = Common::ConstructBitset(
        threshold_int.data(), best_split_info.num_cat_threshold);
812

813
    data_partition_->Split(best_leaf, train_data_, inner_feature_index,
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
                           cat_bitset_inner.data(),
                           static_cast<int>(cat_bitset_inner.size()),
                           best_split_info.default_left, next_leaf_id);

    if (update_cnt) {
      // don't need to update this in data-based parallel model
      best_split_info.left_count = data_partition_->leaf_count(*left_leaf);
      best_split_info.right_count = data_partition_->leaf_count(next_leaf_id);
    }

    *right_leaf = tree->SplitCategorical(
        best_leaf, inner_feature_index, best_split_info.feature,
        cat_bitset_inner.data(), static_cast<int>(cat_bitset_inner.size()),
        cat_bitset.data(), static_cast<int>(cat_bitset.size()),
        static_cast<double>(best_split_info.left_output),
        static_cast<double>(best_split_info.right_output),
        static_cast<data_size_t>(best_split_info.left_count),
        static_cast<data_size_t>(best_split_info.right_count),
        static_cast<double>(best_split_info.left_sum_hessian),
        static_cast<double>(best_split_info.right_sum_hessian),
834
835
        // store the true split gain in tree model
        static_cast<float>(best_split_info.gain + config_->min_gain_to_split),
836
        train_data_->FeatureBinMapper(inner_feature_index)->missing_type());
837
  }
838

839
#ifdef DEBUG
840
  CHECK(*right_leaf == next_leaf_id);
841
#endif
842

Guolin Ke's avatar
Guolin Ke committed
843
844
  // init the leaves that used on next iteration
  if (best_split_info.left_count < best_split_info.right_count) {
845
    CHECK_GT(best_split_info.left_count, 0);
846
847
    smaller_leaf_splits_->Init(*left_leaf, data_partition_.get(),
                               best_split_info.left_sum_gradient,
Belinda Trotta's avatar
Belinda Trotta committed
848
849
                               best_split_info.left_sum_hessian,
                               best_split_info.left_output);
850
851
    larger_leaf_splits_->Init(*right_leaf, data_partition_.get(),
                              best_split_info.right_sum_gradient,
Belinda Trotta's avatar
Belinda Trotta committed
852
853
                              best_split_info.right_sum_hessian,
                              best_split_info.right_output);
Guolin Ke's avatar
Guolin Ke committed
854
  } else {
855
    CHECK_GT(best_split_info.right_count, 0);
856
857
    smaller_leaf_splits_->Init(*right_leaf, data_partition_.get(),
                               best_split_info.right_sum_gradient,
Belinda Trotta's avatar
Belinda Trotta committed
858
859
                               best_split_info.right_sum_hessian,
                               best_split_info.right_output);
860
861
    larger_leaf_splits_->Init(*left_leaf, data_partition_.get(),
                              best_split_info.left_sum_gradient,
Belinda Trotta's avatar
Belinda Trotta committed
862
863
                              best_split_info.left_sum_hessian,
                              best_split_info.left_output);
Guolin Ke's avatar
Guolin Ke committed
864
  }
865
866
867
868
869
  if (config_->use_quantized_grad && config_->tree_learner != std::string("data")) {
    gradient_discretizer_->SetNumBitsInHistogramBin<false>(*left_leaf, *right_leaf,
                                                    data_partition_->leaf_count(*left_leaf),
                                                    data_partition_->leaf_count(*right_leaf));
  }
870
  auto leaves_need_update = constraints_->Update(
871
      is_numerical_split, *left_leaf, *right_leaf,
872
873
874
875
876
      best_split_info.monotone_type, best_split_info.right_output,
      best_split_info.left_output, inner_feature_index, best_split_info,
      best_split_per_leaf_);
  // update leave outputs if needed
  for (auto leaf : leaves_need_update) {
877
    RecomputeBestSplitForLeaf(tree, leaf, &best_split_per_leaf_[leaf]);
878
  }
879
}
Guolin Ke's avatar
Guolin Ke committed
880

881
void SerialTreeLearner::RenewTreeOutput(Tree* tree, const ObjectiveFunction* obj, std::function<double(const label_t*, int)> residual_getter,
882
                                        data_size_t total_num_data, const data_size_t* bag_indices, data_size_t bag_cnt, const double* /*train_score*/) const {
883
  if (obj != nullptr && obj->IsRenewTreeOutput()) {
Nikita Titov's avatar
Nikita Titov committed
884
    CHECK_LE(tree->num_leaves(), data_partition_->num_leaves());
885
886
    const data_size_t* bag_mapper = nullptr;
    if (total_num_data != num_data_) {
887
      CHECK_EQ(bag_cnt, num_data_);
888
889
      bag_mapper = bag_indices;
    }
Guolin Ke's avatar
Guolin Ke committed
890
    std::vector<int> n_nozeroworker_perleaf(tree->num_leaves(), 1);
891
    int num_machines = Network::num_machines();
892
893
894
895
896
    #pragma omp parallel for schedule(static)
    for (int i = 0; i < tree->num_leaves(); ++i) {
      const double output = static_cast<double>(tree->LeafOutput(i));
      data_size_t cnt_leaf_data = 0;
      auto index_mapper = data_partition_->GetIndexOnLeaf(i, &cnt_leaf_data);
Guolin Ke's avatar
Guolin Ke committed
897
898
      if (cnt_leaf_data > 0) {
        // bag_mapper[index_mapper[i]]
899
        const double new_output = obj->RenewTreeOutput(output, residual_getter, index_mapper, bag_mapper, cnt_leaf_data);
Guolin Ke's avatar
Guolin Ke committed
900
901
        tree->SetLeafOutput(i, new_output);
      } else {
902
        CHECK_GT(num_machines, 1);
Guolin Ke's avatar
Guolin Ke committed
903
904
905
906
907
908
909
910
911
        tree->SetLeafOutput(i, 0.0);
        n_nozeroworker_perleaf[i] = 0;
      }
    }
    if (num_machines > 1) {
      std::vector<double> outputs(tree->num_leaves());
      for (int i = 0; i < tree->num_leaves(); ++i) {
        outputs[i] = static_cast<double>(tree->LeafOutput(i));
      }
Guolin Ke's avatar
Guolin Ke committed
912
913
      outputs = Network::GlobalSum(&outputs);
      n_nozeroworker_perleaf = Network::GlobalSum(&n_nozeroworker_perleaf);
Guolin Ke's avatar
Guolin Ke committed
914
915
916
917
918
919
920
      for (int i = 0; i < tree->num_leaves(); ++i) {
        tree->SetLeafOutput(i, outputs[i] / n_nozeroworker_perleaf[i]);
      }
    }
  }
}

921
922
void SerialTreeLearner::ComputeBestSplitForFeature(
    FeatureHistogram* histogram_array_, int feature_index, int real_fidx,
Guolin Ke's avatar
Guolin Ke committed
923
    int8_t is_feature_used, int num_data, const LeafSplits* leaf_splits,
924
    SplitInfo* best_split, double parent_output) {
925
926
927
928
929
930
931
  bool is_feature_numerical = train_data_->FeatureBinMapper(feature_index)
                                  ->bin_type() == BinType::NumericalBin;
  if (is_feature_numerical & !config_->monotone_constraints.empty()) {
    constraints_->RecomputeConstraintsIfNeeded(
        constraints_.get(), feature_index, ~(leaf_splits->leaf_index()),
        train_data_->FeatureNumBin(feature_index));
  }
932
  SplitInfo new_split;
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
  if (config_->use_quantized_grad) {
    const uint8_t hist_bits_bin = gradient_discretizer_->GetHistBitsInLeaf<false>(leaf_splits->leaf_index());
    histogram_array_[feature_index].FindBestThresholdInt(
        leaf_splits->int_sum_gradients_and_hessians(),
        gradient_discretizer_->grad_scale(),
        gradient_discretizer_->hess_scale(),
        hist_bits_bin,
        hist_bits_bin,
        num_data,
        constraints_->GetFeatureConstraint(leaf_splits->leaf_index(), feature_index), parent_output, &new_split);
  } else {
    histogram_array_[feature_index].FindBestThreshold(
        leaf_splits->sum_gradients(), leaf_splits->sum_hessians(), num_data,
        constraints_->GetFeatureConstraint(leaf_splits->leaf_index(), feature_index), parent_output, &new_split);
  }
948
949
950
  new_split.feature = real_fidx;
  if (cegb_ != nullptr) {
    new_split.gain -=
951
        cegb_->DeltaGain(feature_index, real_fidx, leaf_splits->leaf_index(),
952
953
                         num_data, new_split);
  }
954
955
956
957
958
  if (new_split.monotone_type != 0) {
    double penalty = constraints_->ComputeMonotoneSplitGainPenalty(
        leaf_splits->leaf_index(), config_->monotone_penalty);
    new_split.gain *= penalty;
  }
Guolin Ke's avatar
Guolin Ke committed
959
960
961
  // it is needed to filter the features after the above code.
  // Otherwise, the `is_splittable` in `FeatureHistogram` will be wrong, and cause some features being accidentally filtered in the later nodes.
  if (new_split > *best_split && is_feature_used) {
962
963
964
965
    *best_split = new_split;
  }
}

966
967
968
969
970
971
972
973
974
975
976
977
978
979
double SerialTreeLearner::GetParentOutput(const Tree* tree, const LeafSplits* leaf_splits) const {
  double parent_output;
  if (tree->num_leaves() == 1) {
    // for root leaf the "parent" output is its own output because we don't apply any smoothing to the root
    parent_output = FeatureHistogram::CalculateSplittedLeafOutput<true, true, true, false>(
      leaf_splits->sum_gradients(), leaf_splits->sum_hessians(), config_->lambda_l1,
      config_->lambda_l2, config_->max_delta_step, BasicConstraint(),
      config_->path_smooth, static_cast<data_size_t>(leaf_splits->num_data_in_leaf()), 0);
  } else {
    parent_output = leaf_splits->weight();
  }
  return parent_output;
}

980
void SerialTreeLearner::RecomputeBestSplitForLeaf(Tree* tree, int leaf, SplitInfo* split) {
981
982
983
984
985
986
987
988
989
990
991
992
993
  FeatureHistogram* histogram_array_;
  if (!histogram_pool_.Get(leaf, &histogram_array_)) {
    Log::Warning(
        "Get historical Histogram for leaf %d failed, will skip the "
        "``RecomputeBestSplitForLeaf``",
        leaf);
    return;
  }
  double sum_gradients = split->left_sum_gradient + split->right_sum_gradient;
  double sum_hessians = split->left_sum_hessian + split->right_sum_hessian;
  int num_data = split->left_count + split->right_count;

  std::vector<SplitInfo> bests(share_state_->num_threads);
Guolin Ke's avatar
Guolin Ke committed
994
  LeafSplits leaf_splits(num_data, config_);
995
996
  leaf_splits.Init(leaf, sum_gradients, sum_hessians);

997
998
999
1000
1001
1002
1003
1004
  // can't use GetParentOutput because leaf_splits doesn't have weight property set
  double parent_output = 0;
  if (config_->path_smooth > kEpsilon) {
    parent_output = FeatureHistogram::CalculateSplittedLeafOutput<true, true, true, false>(
      sum_gradients, sum_hessians, config_->lambda_l1, config_->lambda_l2, config_->max_delta_step,
      BasicConstraint(), config_->path_smooth, static_cast<data_size_t>(num_data), 0);
  }

1005
1006
  OMP_INIT_EX();
// find splits
1007
std::vector<int8_t> node_used_features = col_sampler_.GetByNode(tree, leaf);
1008
1009
1010
1011
1012
1013
1014
1015
1016
#pragma omp parallel for schedule(static) num_threads(share_state_->num_threads)
  for (int feature_index = 0; feature_index < num_features_; ++feature_index) {
    OMP_LOOP_EX_BEGIN();
    if (!col_sampler_.is_feature_used_bytree()[feature_index] ||
        !histogram_array_[feature_index].is_splittable()) {
      continue;
    }
    const int tid = omp_get_thread_num();
    int real_fidx = train_data_->RealFeatureIndex(feature_index);
1017
    ComputeBestSplitForFeature(histogram_array_, feature_index, real_fidx, node_used_features[feature_index],
1018
                               num_data, &leaf_splits, &bests[tid], parent_output);
1019
1020
1021
1022
1023
1024
1025
1026

    OMP_LOOP_EX_END();
  }
  OMP_THROW_EX();
  auto best_idx = ArrayArgs<SplitInfo>::ArgMax(bests);
  *split = bests[best_idx];
}

Guolin Ke's avatar
Guolin Ke committed
1027
}  // namespace LightGBM