serial_tree_learner.cpp 50.1 KB
Newer Older
1
2
3
4
/*!
 * Copyright (c) 2016 Microsoft Corporation. All rights reserved.
 * Licensed under the MIT License. See LICENSE file in the project root for license information.
 */
Guolin Ke's avatar
Guolin Ke committed
5
6
#include "serial_tree_learner.h"

7
8
9
10
11
#include <LightGBM/network.h>
#include <LightGBM/objective_function.h>
#include <LightGBM/utils/array_args.h>
#include <LightGBM/utils/common.h>

12
13
#include <algorithm>
#include <queue>
14
#include <set>
15
16
17
#include <unordered_map>
#include <utility>

18
19
#include "cost_effective_gradient_boosting.hpp"

Guolin Ke's avatar
Guolin Ke committed
20
21
namespace LightGBM {

Guolin Ke's avatar
Guolin Ke committed
22
SerialTreeLearner::SerialTreeLearner(const Config* config)
23
    : config_(config), col_sampler_(config) {
24
  gradient_discretizer_ = nullptr;
Guolin Ke's avatar
Guolin Ke committed
25
26
27
28
29
}

SerialTreeLearner::~SerialTreeLearner() {
}

30
void SerialTreeLearner::Init(const Dataset* train_data, bool is_constant_hessian) {
Guolin Ke's avatar
Guolin Ke committed
31
32
33
  train_data_ = train_data;
  num_data_ = train_data_->num_data();
  num_features_ = train_data_->num_features();
34
35
  int max_cache_size = 0;
  // Get the max size of pool
Guolin Ke's avatar
Guolin Ke committed
36
37
  if (config_->histogram_pool_size <= 0) {
    max_cache_size = config_->num_leaves;
38
39
40
  } else {
    size_t total_histogram_size = 0;
    for (int i = 0; i < train_data_->num_features(); ++i) {
41
      total_histogram_size += kHistEntrySize * train_data_->FeatureNumBin(i);
42
    }
Guolin Ke's avatar
Guolin Ke committed
43
    max_cache_size = static_cast<int>(config_->histogram_pool_size * 1024 * 1024 / total_histogram_size);
44
45
  }
  // at least need 2 leaves
Guolin Ke's avatar
Guolin Ke committed
46
  max_cache_size = std::max(2, max_cache_size);
Guolin Ke's avatar
Guolin Ke committed
47
  max_cache_size = std::min(max_cache_size, config_->num_leaves);
Guolin Ke's avatar
Guolin Ke committed
48

Guolin Ke's avatar
Guolin Ke committed
49
  // push split information for all leaves
Guolin Ke's avatar
Guolin Ke committed
50
  best_split_per_leaf_.resize(config_->num_leaves);
51
  constraints_.reset(LeafConstraintsBase::Create(config_, config_->num_leaves, train_data_->num_features()));
Guolin Ke's avatar
Guolin Ke committed
52

wxchan's avatar
wxchan committed
53
  // initialize splits for leaf
Guolin Ke's avatar
Guolin Ke committed
54
55
  smaller_leaf_splits_.reset(new LeafSplits(train_data_->num_data(), config_));
  larger_leaf_splits_.reset(new LeafSplits(train_data_->num_data(), config_));
Guolin Ke's avatar
Guolin Ke committed
56
57

  // initialize data partition
Guolin Ke's avatar
Guolin Ke committed
58
  data_partition_.reset(new DataPartition(num_data_, config_->num_leaves));
59
  col_sampler_.SetTrainingData(train_data_);
Guolin Ke's avatar
Guolin Ke committed
60
  // initialize ordered gradients and hessians
Guolin Ke's avatar
Guolin Ke committed
61
62
  ordered_gradients_.resize(num_data_);
  ordered_hessians_.resize(num_data_);
63

64
65
66
67
68
  if (config_->use_quantized_grad) {
    gradient_discretizer_.reset(new GradientDiscretizer(config_->num_grad_quant_bins, config_->num_iterations, config_->seed, is_constant_hessian, config_->stochastic_rounding));
    gradient_discretizer_->Init(num_data_, config_->num_leaves, num_features_, train_data_);
  }

69
  GetShareStates(train_data_, is_constant_hessian, true);
70
71
72
73
  histogram_pool_.DynamicChangeSize(train_data_,
  share_state_->num_hist_total_bin(),
  share_state_->feature_hist_offsets(),
  config_, max_cache_size, config_->num_leaves);
74
  Log::Info("Number of data points in the train set: %d, number of used features: %d", num_data_, num_features_);
75
76
77
  if (CostEfficientGradientBoosting::IsEnable(config_)) {
    cegb_.reset(new CostEfficientGradientBoosting(this));
    cegb_->Init();
78
  }
Guolin Ke's avatar
Guolin Ke committed
79
80
}

81
82
83
void SerialTreeLearner::GetShareStates(const Dataset* dataset,
                                       bool is_constant_hessian,
                                       bool is_first_time) {
84
  if (is_first_time) {
85
86
87
    if (config_->use_quantized_grad) {
      share_state_.reset(dataset->GetShareStates<true, 32>(
        reinterpret_cast<score_t*>(gradient_discretizer_->ordered_int_gradients_and_hessians()), nullptr,
88
        col_sampler_.is_feature_used_bytree(), is_constant_hessian,
89
90
91
92
93
94
95
        config_->force_col_wise, config_->force_row_wise, config_->num_grad_quant_bins));
    } else {
      share_state_.reset(dataset->GetShareStates<false, 0>(
          ordered_gradients_.data(), ordered_hessians_.data(),
          col_sampler_.is_feature_used_bytree(), is_constant_hessian,
          config_->force_col_wise, config_->force_row_wise, config_->num_grad_quant_bins));
    }
96
  } else {
Nikita Titov's avatar
Nikita Titov committed
97
    CHECK_NOTNULL(share_state_);
98
    // cannot change is_hist_col_wise during training
99
100
101
102
103
104
105
106
107
108
109
    if (config_->use_quantized_grad) {
      share_state_.reset(dataset->GetShareStates<true, 32>(
          reinterpret_cast<score_t*>(gradient_discretizer_->ordered_int_gradients_and_hessians()), nullptr,
          col_sampler_.is_feature_used_bytree(), is_constant_hessian,
          share_state_->is_col_wise, !share_state_->is_col_wise, config_->num_grad_quant_bins));
    } else {
      share_state_.reset(dataset->GetShareStates<false, 0>(
          ordered_gradients_.data(), ordered_hessians_.data(), col_sampler_.is_feature_used_bytree(),
          is_constant_hessian, share_state_->is_col_wise,
          !share_state_->is_col_wise, config_->num_grad_quant_bins));
    }
110
  }
Nikita Titov's avatar
Nikita Titov committed
111
  CHECK_NOTNULL(share_state_);
112
113
}

114
115
116
void SerialTreeLearner::ResetTrainingDataInner(const Dataset* train_data,
                                               bool is_constant_hessian,
                                               bool reset_multi_val_bin) {
Guolin Ke's avatar
Guolin Ke committed
117
118
  train_data_ = train_data;
  num_data_ = train_data_->num_data();
119
  CHECK_EQ(num_features_, train_data_->num_features());
Guolin Ke's avatar
Guolin Ke committed
120
121
122
123
124
125
126

  // initialize splits for leaf
  smaller_leaf_splits_->ResetNumData(num_data_);
  larger_leaf_splits_->ResetNumData(num_data_);

  // initialize data partition
  data_partition_->ResetNumData(num_data_);
127
  if (reset_multi_val_bin) {
128
    col_sampler_.SetTrainingData(train_data_);
129
130
    GetShareStates(train_data_, is_constant_hessian, false);
  }
131

Guolin Ke's avatar
Guolin Ke committed
132
133
134
  // initialize ordered gradients and hessians
  ordered_gradients_.resize(num_data_);
  ordered_hessians_.resize(num_data_);
135
136
137
  if (cegb_ != nullptr) {
    cegb_->Init();
  }
Guolin Ke's avatar
Guolin Ke committed
138
}
Guolin Ke's avatar
Guolin Ke committed
139

Guolin Ke's avatar
Guolin Ke committed
140
141
142
void SerialTreeLearner::ResetConfig(const Config* config) {
  if (config_->num_leaves != config->num_leaves) {
    config_ = config;
Guolin Ke's avatar
Guolin Ke committed
143
144
    int max_cache_size = 0;
    // Get the max size of pool
Guolin Ke's avatar
Guolin Ke committed
145
146
    if (config->histogram_pool_size <= 0) {
      max_cache_size = config_->num_leaves;
Guolin Ke's avatar
Guolin Ke committed
147
148
149
    } else {
      size_t total_histogram_size = 0;
      for (int i = 0; i < train_data_->num_features(); ++i) {
150
        total_histogram_size += kHistEntrySize * train_data_->FeatureNumBin(i);
Guolin Ke's avatar
Guolin Ke committed
151
      }
Guolin Ke's avatar
Guolin Ke committed
152
      max_cache_size = static_cast<int>(config_->histogram_pool_size * 1024 * 1024 / total_histogram_size);
Guolin Ke's avatar
Guolin Ke committed
153
154
155
    }
    // at least need 2 leaves
    max_cache_size = std::max(2, max_cache_size);
Guolin Ke's avatar
Guolin Ke committed
156
    max_cache_size = std::min(max_cache_size, config_->num_leaves);
157
158
159
160
    histogram_pool_.DynamicChangeSize(train_data_,
    share_state_->num_hist_total_bin(),
    share_state_->feature_hist_offsets(),
    config_, max_cache_size, config_->num_leaves);
Guolin Ke's avatar
Guolin Ke committed
161
162

    // push split information for all leaves
Guolin Ke's avatar
Guolin Ke committed
163
164
    best_split_per_leaf_.resize(config_->num_leaves);
    data_partition_->ResetLeaves(config_->num_leaves);
Guolin Ke's avatar
Guolin Ke committed
165
  } else {
Guolin Ke's avatar
Guolin Ke committed
166
    config_ = config;
Guolin Ke's avatar
Guolin Ke committed
167
  }
168
  col_sampler_.SetConfig(config_);
169
  histogram_pool_.ResetConfig(train_data_, config_);
170
  if (CostEfficientGradientBoosting::IsEnable(config_)) {
171
172
173
    if (cegb_ == nullptr) {
      cegb_.reset(new CostEfficientGradientBoosting(this));
    }
174
175
    cegb_->Init();
  }
176
  constraints_.reset(LeafConstraintsBase::Create(config_, config_->num_leaves, train_data_->num_features()));
Guolin Ke's avatar
Guolin Ke committed
177
178
}

179
Tree* SerialTreeLearner::Train(const score_t* gradients, const score_t *hessians, bool /*is_first_tree*/) {
180
  Common::FunctionTimer fun_timer("SerialTreeLearner::Train", global_timer);
Guolin Ke's avatar
Guolin Ke committed
181
182
  gradients_ = gradients;
  hessians_ = hessians;
183
  int num_threads = OMP_NUM_THREADS();
Nikita Titov's avatar
Nikita Titov committed
184
  if (share_state_->num_threads != num_threads && share_state_->num_threads > 0) {
185
    Log::Warning(
Nikita Titov's avatar
Nikita Titov committed
186
187
        "Detected that num_threads changed during training (from %d to %d), "
        "it may cause unexpected errors.",
188
189
190
        share_state_->num_threads, num_threads);
  }
  share_state_->num_threads = num_threads;
Nikita Titov's avatar
Nikita Titov committed
191

192
193
194
195
  if (config_->use_quantized_grad) {
    gradient_discretizer_->DiscretizeGradients(num_data_, gradients_, hessians_);
  }

Guolin Ke's avatar
Guolin Ke committed
196
197
  // some initial works before training
  BeforeTrain();
Guolin Ke's avatar
Guolin Ke committed
198

199
  bool track_branch_features = !(config_->interaction_constraints_vector.empty());
200
  auto tree = std::unique_ptr<Tree>(new Tree(config_->num_leaves, track_branch_features, false));
Guolin Ke's avatar
Guolin Ke committed
201
202
  auto tree_ptr = tree.get();
  constraints_->ShareTreePointer(tree_ptr);
203

204
205
206
207
208
209
  // set the root value by hand, as it is not handled by splits
  tree->SetLeafOutput(0, FeatureHistogram::CalculateSplittedLeafOutput<true, true, true, false>(
    smaller_leaf_splits_->sum_gradients(), smaller_leaf_splits_->sum_hessians(),
    config_->lambda_l1, config_->lambda_l2, config_->max_delta_step,
    BasicConstraint(), config_->path_smooth, static_cast<data_size_t>(num_data_), 0));

Guolin Ke's avatar
Guolin Ke committed
210
211
  // root leaf
  int left_leaf = 0;
212
  int cur_depth = 1;
Guolin Ke's avatar
Guolin Ke committed
213
214
  // only root leaf can be splitted on first time
  int right_leaf = -1;
215

Guolin Ke's avatar
Guolin Ke committed
216
  int init_splits = ForceSplits(tree_ptr, &left_leaf, &right_leaf, &cur_depth);
217

Guolin Ke's avatar
Guolin Ke committed
218
  for (int split = init_splits; split < config_->num_leaves - 1; ++split) {
Guolin Ke's avatar
Guolin Ke committed
219
    // some initial works before finding best split
Guolin Ke's avatar
Guolin Ke committed
220
    if (BeforeFindBestSplit(tree_ptr, left_leaf, right_leaf)) {
Guolin Ke's avatar
Guolin Ke committed
221
      // find best threshold for every feature
Guolin Ke's avatar
Guolin Ke committed
222
      FindBestSplits(tree_ptr);
223
    }
Guolin Ke's avatar
Guolin Ke committed
224
225
226
227
228
229
    // Get a leaf with max split gain
    int best_leaf = static_cast<int>(ArrayArgs<SplitInfo>::ArgMax(best_split_per_leaf_));
    // Get split information for best leaf
    const SplitInfo& best_leaf_SplitInfo = best_split_per_leaf_[best_leaf];
    // cannot split, quit
    if (best_leaf_SplitInfo.gain <= 0.0) {
Guolin Ke's avatar
Guolin Ke committed
230
      Log::Warning("No further splits with positive gain, best gain: %f", best_leaf_SplitInfo.gain);
Guolin Ke's avatar
Guolin Ke committed
231
232
233
      break;
    }
    // split tree with best leaf
Guolin Ke's avatar
Guolin Ke committed
234
    Split(tree_ptr, best_leaf, &left_leaf, &right_leaf);
235
    cur_depth = std::max(cur_depth, tree->leaf_depth(left_leaf));
Guolin Ke's avatar
Guolin Ke committed
236
  }
237

238
239
240
241
242
  if (config_->use_quantized_grad && config_->quant_train_renew_leaf) {
    gradient_discretizer_->RenewIntGradTreeOutput(tree.get(), config_, data_partition_.get(), gradients_, hessians_,
      [this] (int leaf_index) { return GetGlobalDataCountInLeaf(leaf_index); });
  }

243
  Log::Debug("Trained a tree with leaves = %d and depth = %d", tree->num_leaves(), cur_depth);
Guolin Ke's avatar
Guolin Ke committed
244
  return tree.release();
Guolin Ke's avatar
Guolin Ke committed
245
246
}

247
Tree* SerialTreeLearner::FitByExistingTree(const Tree* old_tree, const score_t* gradients, const score_t *hessians) const {
Guolin Ke's avatar
Guolin Ke committed
248
  auto tree = std::unique_ptr<Tree>(new Tree(*old_tree));
Nikita Titov's avatar
Nikita Titov committed
249
  CHECK_GE(data_partition_->num_leaves(), tree->num_leaves());
250
  OMP_INIT_EX();
251
  #pragma omp parallel for num_threads(OMP_NUM_THREADS()) schedule(static)
252
  for (int i = 0; i < tree->num_leaves(); ++i) {
253
    OMP_LOOP_EX_BEGIN();
Guolin Ke's avatar
Guolin Ke committed
254
255
256
    data_size_t cnt_leaf_data = 0;
    auto tmp_idx = data_partition_->GetIndexOnLeaf(i, &cnt_leaf_data);
    double sum_grad = 0.0f;
257
    double sum_hess = kEpsilon;
Guolin Ke's avatar
Guolin Ke committed
258
259
260
261
262
    for (data_size_t j = 0; j < cnt_leaf_data; ++j) {
      auto idx = tmp_idx[j];
      sum_grad += gradients[idx];
      sum_hess += hessians[idx];
    }
Belinda Trotta's avatar
Belinda Trotta committed
263
264
265
266
267
268
269
270
271
272
    double output;
    if ((config_->path_smooth > kEpsilon) & (i > 0)) {
      output = FeatureHistogram::CalculateSplittedLeafOutput<true, true, true>(
          sum_grad, sum_hess, config_->lambda_l1, config_->lambda_l2,
          config_->max_delta_step, config_->path_smooth, cnt_leaf_data, tree->leaf_parent(i));
    } else {
      output = FeatureHistogram::CalculateSplittedLeafOutput<true, true, false>(
          sum_grad, sum_hess, config_->lambda_l1, config_->lambda_l2,
          config_->max_delta_step, config_->path_smooth, cnt_leaf_data, 0);
    }
Guolin Ke's avatar
Guolin Ke committed
273
274
275
    auto old_leaf_output = tree->LeafOutput(i);
    auto new_leaf_output = output * tree->shrinkage();
    tree->SetLeafOutput(i, config_->refit_decay_rate * old_leaf_output + (1.0 - config_->refit_decay_rate) * new_leaf_output);
276
    OMP_LOOP_EX_END();
Guolin Ke's avatar
Guolin Ke committed
277
  }
278
  OMP_THROW_EX();
Guolin Ke's avatar
Guolin Ke committed
279
280
281
  return tree.release();
}

282
283
Tree* SerialTreeLearner::FitByExistingTree(const Tree* old_tree, const std::vector<int>& leaf_pred,
                                           const score_t* gradients, const score_t *hessians) const {
284
285
286
287
  data_partition_->ResetByLeafPred(leaf_pred, old_tree->num_leaves());
  return FitByExistingTree(old_tree, gradients, hessians);
}

Guolin Ke's avatar
Guolin Ke committed
288
void SerialTreeLearner::BeforeTrain() {
289
  Common::FunctionTimer fun_timer("SerialTreeLearner::BeforeTrain", global_timer);
290
291
  // reset histogram pool
  histogram_pool_.ResetMap();
Guolin Ke's avatar
Guolin Ke committed
292

293
294
  col_sampler_.ResetByTree();
  train_data_->InitTrain(col_sampler_.is_feature_used_bytree(), share_state_.get());
Guolin Ke's avatar
Guolin Ke committed
295
296
297
  // initialize data partition
  data_partition_->Init();

298
299
  constraints_->Reset();

Guolin Ke's avatar
Guolin Ke committed
300
  // reset the splits for leaves
Guolin Ke's avatar
Guolin Ke committed
301
  for (int i = 0; i < config_->num_leaves; ++i) {
Guolin Ke's avatar
Guolin Ke committed
302
303
304
305
306
307
    best_split_per_leaf_[i].Reset();
  }

  // Sumup for root
  if (data_partition_->leaf_count(0) == num_data_) {
    // use all data
308
309
310
311
312
313
314
315
    if (!config_->use_quantized_grad) {
      smaller_leaf_splits_->Init(gradients_, hessians_);
    } else {
      smaller_leaf_splits_->Init(
        gradient_discretizer_->discretized_gradients_and_hessians(),
        gradient_discretizer_->grad_scale(),
        gradient_discretizer_->hess_scale());
    }
Guolin Ke's avatar
Guolin Ke committed
316
317
  } else {
    // use bagging, only use part of data
318
319
320
321
322
323
    if (!config_->use_quantized_grad) {
      smaller_leaf_splits_->Init(0, data_partition_.get(), gradients_, hessians_);
    } else {
      smaller_leaf_splits_->Init(
        0, data_partition_.get(),
        gradient_discretizer_->discretized_gradients_and_hessians(),
324
325
        static_cast<score_t>(gradient_discretizer_->grad_scale()),
        static_cast<score_t>(gradient_discretizer_->hess_scale()));
326
    }
Guolin Ke's avatar
Guolin Ke committed
327
328
329
  }

  larger_leaf_splits_->Init();
330
331
332
333

  if (cegb_ != nullptr) {
    cegb_->BeforeTrain();
  }
334
335
336
337

  if (config_->use_quantized_grad && config_->tree_learner != std::string("data")) {
    gradient_discretizer_->SetNumBitsInHistogramBin<false>(0, -1, data_partition_->leaf_count(0), 0);
  }
Guolin Ke's avatar
Guolin Ke committed
338
339
}

Guolin Ke's avatar
Guolin Ke committed
340
bool SerialTreeLearner::BeforeFindBestSplit(const Tree* tree, int left_leaf, int right_leaf) {
341
  Common::FunctionTimer fun_timer("SerialTreeLearner::BeforeFindBestSplit", global_timer);
Guolin Ke's avatar
Guolin Ke committed
342
  // check depth of current leaf
Guolin Ke's avatar
Guolin Ke committed
343
  if (config_->max_depth > 0) {
Guolin Ke's avatar
Guolin Ke committed
344
    // only need to check left leaf, since right leaf is in same level of left leaf
Guolin Ke's avatar
Guolin Ke committed
345
    if (tree->leaf_depth(left_leaf) >= config_->max_depth) {
Guolin Ke's avatar
Guolin Ke committed
346
347
348
349
350
351
352
      best_split_per_leaf_[left_leaf].gain = kMinScore;
      if (right_leaf >= 0) {
        best_split_per_leaf_[right_leaf].gain = kMinScore;
      }
      return false;
    }
  }
Guolin Ke's avatar
Guolin Ke committed
353
354
355
  data_size_t num_data_in_left_child = GetGlobalDataCountInLeaf(left_leaf);
  data_size_t num_data_in_right_child = GetGlobalDataCountInLeaf(right_leaf);
  // no enough data to continue
Guolin Ke's avatar
Guolin Ke committed
356
357
  if (num_data_in_right_child < static_cast<data_size_t>(config_->min_data_in_leaf * 2)
      && num_data_in_left_child < static_cast<data_size_t>(config_->min_data_in_leaf * 2)) {
Guolin Ke's avatar
Guolin Ke committed
358
359
360
361
362
363
    best_split_per_leaf_[left_leaf].gain = kMinScore;
    if (right_leaf >= 0) {
      best_split_per_leaf_[right_leaf].gain = kMinScore;
    }
    return false;
  }
364
  parent_leaf_histogram_array_ = nullptr;
Guolin Ke's avatar
Guolin Ke committed
365
366
  // only have root
  if (right_leaf < 0) {
367
    histogram_pool_.Get(left_leaf, &smaller_leaf_histogram_array_);
Guolin Ke's avatar
Guolin Ke committed
368
369
    larger_leaf_histogram_array_ = nullptr;
  } else if (num_data_in_left_child < num_data_in_right_child) {
Hui Xue's avatar
Hui Xue committed
370
    // put parent(left) leaf's histograms into larger leaf's histograms
371
372
373
    if (histogram_pool_.Get(left_leaf, &larger_leaf_histogram_array_)) { parent_leaf_histogram_array_ = larger_leaf_histogram_array_; }
    histogram_pool_.Move(left_leaf, right_leaf);
    histogram_pool_.Get(left_leaf, &smaller_leaf_histogram_array_);
Guolin Ke's avatar
Guolin Ke committed
374
  } else {
Hui Xue's avatar
Hui Xue committed
375
    // put parent(left) leaf's histograms to larger leaf's histograms
376
377
    if (histogram_pool_.Get(left_leaf, &larger_leaf_histogram_array_)) { parent_leaf_histogram_array_ = larger_leaf_histogram_array_; }
    histogram_pool_.Get(right_leaf, &smaller_leaf_histogram_array_);
Guolin Ke's avatar
Guolin Ke committed
378
379
380
381
  }
  return true;
}

382
void SerialTreeLearner::FindBestSplits(const Tree* tree) {
383
384
385
386
  FindBestSplits(tree, nullptr);
}

void SerialTreeLearner::FindBestSplits(const Tree* tree, const std::set<int>* force_features) {
Guolin Ke's avatar
Guolin Ke committed
387
  std::vector<int8_t> is_feature_used(num_features_, 0);
388
  #pragma omp parallel for num_threads(OMP_NUM_THREADS()) schedule(static, 256) if (num_features_ >= 512)
Guolin Ke's avatar
Guolin Ke committed
389
  for (int feature_index = 0; feature_index < num_features_; ++feature_index) {
390
    if (!col_sampler_.is_feature_used_bytree()[feature_index] && (force_features == nullptr || force_features->find(feature_index) == force_features->end())) continue;
Guolin Ke's avatar
Guolin Ke committed
391
392
393
394
395
396
397
398
    if (parent_leaf_histogram_array_ != nullptr
        && !parent_leaf_histogram_array_[feature_index].is_splittable()) {
      smaller_leaf_histogram_array_[feature_index].set_is_splittable(false);
      continue;
    }
    is_feature_used[feature_index] = 1;
  }
  bool use_subtract = parent_leaf_histogram_array_ != nullptr;
399

Guolin Ke's avatar
Guolin Ke committed
400
  ConstructHistograms(is_feature_used, use_subtract);
401
  FindBestSplitsFromHistograms(is_feature_used, use_subtract, tree);
Guolin Ke's avatar
Guolin Ke committed
402
403
}

404
405
406
407
void SerialTreeLearner::ConstructHistograms(
    const std::vector<int8_t>& is_feature_used, bool use_subtract) {
  Common::FunctionTimer fun_timer("SerialTreeLearner::ConstructHistograms",
                                  global_timer);
Guolin Ke's avatar
Guolin Ke committed
408
  // construct smaller leaf
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
  if (config_->use_quantized_grad) {
    const uint8_t smaller_leaf_num_bits = gradient_discretizer_->GetHistBitsInLeaf<false>(smaller_leaf_splits_->leaf_index());
    hist_t* ptr_smaller_leaf_hist_data =
        smaller_leaf_num_bits <= 16 ?
        reinterpret_cast<hist_t*>(smaller_leaf_histogram_array_[0].RawDataInt16() - kHistOffset) :
        reinterpret_cast<hist_t*>(smaller_leaf_histogram_array_[0].RawDataInt32() - kHistOffset);
    #define SMALLER_LEAF_ARGS \
      is_feature_used, smaller_leaf_splits_->data_indices(), \
      smaller_leaf_splits_->num_data_in_leaf(), \
      reinterpret_cast<const score_t*>(gradient_discretizer_->discretized_gradients_and_hessians()), \
      nullptr, \
      reinterpret_cast<score_t*>(gradient_discretizer_->ordered_int_gradients_and_hessians()), \
      nullptr, \
      share_state_.get(), \
      reinterpret_cast<hist_t*>(ptr_smaller_leaf_hist_data)
    if (smaller_leaf_num_bits <= 16) {
      train_data_->ConstructHistograms<true, 16>(SMALLER_LEAF_ARGS);
    } else {
      train_data_->ConstructHistograms<true, 32>(SMALLER_LEAF_ARGS);
    }
    #undef SMALLER_LEAF_ARGS
    if (larger_leaf_histogram_array_ && !use_subtract) {
      const uint8_t larger_leaf_num_bits = gradient_discretizer_->GetHistBitsInLeaf<false>(larger_leaf_splits_->leaf_index());
      hist_t* ptr_larger_leaf_hist_data =
        larger_leaf_num_bits <= 16 ?
        reinterpret_cast<hist_t*>(larger_leaf_histogram_array_[0].RawDataInt16() - kHistOffset) :
        reinterpret_cast<hist_t*>(larger_leaf_histogram_array_[0].RawDataInt32() - kHistOffset);
      #define LARGER_LEAF_ARGS \
        is_feature_used, larger_leaf_splits_->data_indices(), \
        larger_leaf_splits_->num_data_in_leaf(), \
        reinterpret_cast<const score_t*>(gradient_discretizer_->discretized_gradients_and_hessians()), \
        nullptr, \
        reinterpret_cast<score_t*>(gradient_discretizer_->ordered_int_gradients_and_hessians()), \
        nullptr, \
        share_state_.get(), \
        reinterpret_cast<hist_t*>(ptr_larger_leaf_hist_data)
      if (larger_leaf_num_bits <= 16) {
        train_data_->ConstructHistograms<true, 16>(LARGER_LEAF_ARGS);
      } else {
        train_data_->ConstructHistograms<true, 32>(LARGER_LEAF_ARGS);
      }
      #undef LARGER_LEAF_ARGS
    }
  } else {
    hist_t* ptr_smaller_leaf_hist_data =
        smaller_leaf_histogram_array_[0].RawData() - kHistOffset;
    train_data_->ConstructHistograms<false, 0>(
        is_feature_used, smaller_leaf_splits_->data_indices(),
        smaller_leaf_splits_->num_data_in_leaf(), gradients_, hessians_,
458
        ordered_gradients_.data(), ordered_hessians_.data(), share_state_.get(),
459
460
461
462
463
464
465
466
467
468
469
        ptr_smaller_leaf_hist_data);
    if (larger_leaf_histogram_array_ != nullptr && !use_subtract) {
      // construct larger leaf
      hist_t* ptr_larger_leaf_hist_data =
          larger_leaf_histogram_array_[0].RawData() - kHistOffset;
      train_data_->ConstructHistograms<false, 0>(
          is_feature_used, larger_leaf_splits_->data_indices(),
          larger_leaf_splits_->num_data_in_leaf(), gradients_, hessians_,
          ordered_gradients_.data(), ordered_hessians_.data(), share_state_.get(),
          ptr_larger_leaf_hist_data);
    }
Guolin Ke's avatar
Guolin Ke committed
470
  }
471
472
}

Guolin Ke's avatar
Guolin Ke committed
473
void SerialTreeLearner::FindBestSplitsFromHistograms(
474
    const std::vector<int8_t>& is_feature_used, bool use_subtract, const Tree* tree) {
Guolin Ke's avatar
Guolin Ke committed
475
476
  Common::FunctionTimer fun_timer(
      "SerialTreeLearner::FindBestSplitsFromHistograms", global_timer);
477
478
  std::vector<SplitInfo> smaller_best(share_state_->num_threads);
  std::vector<SplitInfo> larger_best(share_state_->num_threads);
479
480
  std::vector<int8_t> smaller_node_used_features = col_sampler_.GetByNode(tree, smaller_leaf_splits_->leaf_index());
  std::vector<int8_t> larger_node_used_features;
481
482
483
484
485
  double smaller_leaf_parent_output = GetParentOutput(tree, smaller_leaf_splits_.get());
  double larger_leaf_parent_output = 0;
  if (larger_leaf_splits_ != nullptr && larger_leaf_splits_->leaf_index() >= 0) {
    larger_leaf_parent_output = GetParentOutput(tree, larger_leaf_splits_.get());
  }
486
487
488
  if (larger_leaf_splits_->leaf_index() >= 0) {
    larger_node_used_features = col_sampler_.GetByNode(tree, larger_leaf_splits_->leaf_index());
  }
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508

  if (use_subtract && config_->use_quantized_grad) {
    const int parent_index = std::min(smaller_leaf_splits_->leaf_index(), larger_leaf_splits_->leaf_index());
    const uint8_t parent_hist_bits = gradient_discretizer_->GetHistBitsInNode<false>(parent_index);
    const uint8_t larger_hist_bits = gradient_discretizer_->GetHistBitsInLeaf<false>(larger_leaf_splits_->leaf_index());
    if (parent_hist_bits > 16 && larger_hist_bits <= 16) {
      OMP_INIT_EX();
      #pragma omp parallel for schedule(static) num_threads(share_state_->num_threads)
      for (int feature_index = 0; feature_index < num_features_; ++feature_index) {
        OMP_LOOP_EX_BEGIN();
        if (!is_feature_used[feature_index]) {
          continue;
        }
        larger_leaf_histogram_array_[feature_index].CopyToBuffer(gradient_discretizer_->GetChangeHistBitsBuffer(feature_index));
        OMP_LOOP_EX_END();
      }
      OMP_THROW_EX();
    }
  }

509
  OMP_INIT_EX();
Guolin Ke's avatar
Guolin Ke committed
510
// find splits
511
#pragma omp parallel for schedule(static) num_threads(share_state_->num_threads)
Guolin Ke's avatar
Guolin Ke committed
512
  for (int feature_index = 0; feature_index < num_features_; ++feature_index) {
513
    OMP_LOOP_EX_BEGIN();
Guolin Ke's avatar
Guolin Ke committed
514
515
516
    if (!is_feature_used[feature_index]) {
      continue;
    }
Guolin Ke's avatar
Guolin Ke committed
517
    const int tid = omp_get_thread_num();
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
    if (config_->use_quantized_grad) {
      const uint8_t hist_bits_bin = gradient_discretizer_->GetHistBitsInLeaf<false>(smaller_leaf_splits_->leaf_index());
      const int64_t int_sum_gradient_and_hessian = smaller_leaf_splits_->int_sum_gradients_and_hessians();
      if (hist_bits_bin <= 16) {
        train_data_->FixHistogramInt<int32_t, int32_t, 16, 16>(
            feature_index, int_sum_gradient_and_hessian,
            reinterpret_cast<hist_t*>(smaller_leaf_histogram_array_[feature_index].RawDataInt16()));
      } else {
        train_data_->FixHistogramInt<int64_t, int64_t, 32, 32>(
            feature_index, int_sum_gradient_and_hessian,
            reinterpret_cast<hist_t*>(smaller_leaf_histogram_array_[feature_index].RawDataInt32()));
      }
    } else {
      train_data_->FixHistogram(
          feature_index, smaller_leaf_splits_->sum_gradients(),
          smaller_leaf_splits_->sum_hessians(),
          smaller_leaf_histogram_array_[feature_index].RawData());
    }
536
    int real_fidx = train_data_->RealFeatureIndex(feature_index);
537
538
539
540
541

    ComputeBestSplitForFeature(smaller_leaf_histogram_array_, feature_index,
                               real_fidx,
                               smaller_node_used_features[feature_index],
                               smaller_leaf_splits_->num_data_in_leaf(),
542
543
                               smaller_leaf_splits_.get(), &smaller_best[tid],
                               smaller_leaf_parent_output);
544

Guolin Ke's avatar
Guolin Ke committed
545
    // only has root leaf
Guolin Ke's avatar
Guolin Ke committed
546
547
548
549
    if (larger_leaf_splits_ == nullptr ||
        larger_leaf_splits_->leaf_index() < 0) {
      continue;
    }
Guolin Ke's avatar
Guolin Ke committed
550

Guolin Ke's avatar
Guolin Ke committed
551
    if (use_subtract) {
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
      if (config_->use_quantized_grad) {
        const int parent_index = std::min(smaller_leaf_splits_->leaf_index(), larger_leaf_splits_->leaf_index());
        const uint8_t parent_hist_bits = gradient_discretizer_->GetHistBitsInNode<false>(parent_index);
        const uint8_t smaller_hist_bits = gradient_discretizer_->GetHistBitsInLeaf<false>(smaller_leaf_splits_->leaf_index());
        const uint8_t larger_hist_bits = gradient_discretizer_->GetHistBitsInLeaf<false>(larger_leaf_splits_->leaf_index());
        if (parent_hist_bits <= 16) {
          CHECK_LE(smaller_hist_bits, 16);
          CHECK_LE(larger_hist_bits, 16);
          larger_leaf_histogram_array_[feature_index].Subtract<true, int32_t, int32_t, int32_t, 16, 16, 16>(
              smaller_leaf_histogram_array_[feature_index]);
        } else if (larger_hist_bits <= 16) {
          CHECK_LE(smaller_hist_bits, 16);
          larger_leaf_histogram_array_[feature_index].Subtract<true, int64_t, int32_t, int32_t, 32, 16, 16>(
              smaller_leaf_histogram_array_[feature_index], gradient_discretizer_->GetChangeHistBitsBuffer(feature_index));
        } else if (smaller_hist_bits <= 16) {
          larger_leaf_histogram_array_[feature_index].Subtract<true, int64_t, int32_t, int64_t, 32, 16, 32>(
              smaller_leaf_histogram_array_[feature_index]);
        } else {
          larger_leaf_histogram_array_[feature_index].Subtract<true, int64_t, int64_t, int64_t, 32, 32, 32>(
              smaller_leaf_histogram_array_[feature_index]);
        }
      } else {
        larger_leaf_histogram_array_[feature_index].Subtract<false>(
            smaller_leaf_histogram_array_[feature_index]);
      }
577
    } else {
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
      if (config_->use_quantized_grad) {
        const int64_t int_sum_gradient_and_hessian = larger_leaf_splits_->int_sum_gradients_and_hessians();
        const uint8_t hist_bits_bin = gradient_discretizer_->GetHistBitsInLeaf<false>(larger_leaf_splits_->leaf_index());
        if (hist_bits_bin <= 16) {
          train_data_->FixHistogramInt<int32_t, int32_t, 16, 16>(
              feature_index, int_sum_gradient_and_hessian,
              reinterpret_cast<hist_t*>(larger_leaf_histogram_array_[feature_index].RawDataInt16()));
        } else {
          train_data_->FixHistogramInt<int64_t, int64_t, 32, 32>(
              feature_index, int_sum_gradient_and_hessian,
              reinterpret_cast<hist_t*>(larger_leaf_histogram_array_[feature_index].RawDataInt32()));
        }
      } else {
        train_data_->FixHistogram(
            feature_index, larger_leaf_splits_->sum_gradients(),
            larger_leaf_splits_->sum_hessians(),
            larger_leaf_histogram_array_[feature_index].RawData());
      }
596
    }
597
598
599
600
601

    ComputeBestSplitForFeature(larger_leaf_histogram_array_, feature_index,
                               real_fidx,
                               larger_node_used_features[feature_index],
                               larger_leaf_splits_->num_data_in_leaf(),
602
603
                               larger_leaf_splits_.get(), &larger_best[tid],
                               larger_leaf_parent_output);
604

605
    OMP_LOOP_EX_END();
Guolin Ke's avatar
Guolin Ke committed
606
  }
607
  OMP_THROW_EX();
Guolin Ke's avatar
Guolin Ke committed
608
  auto smaller_best_idx = ArrayArgs<SplitInfo>::ArgMax(smaller_best);
609
  int leaf = smaller_leaf_splits_->leaf_index();
Guolin Ke's avatar
Guolin Ke committed
610
611
  best_split_per_leaf_[leaf] = smaller_best[smaller_best_idx];

Guolin Ke's avatar
Guolin Ke committed
612
613
  if (larger_leaf_splits_ != nullptr &&
      larger_leaf_splits_->leaf_index() >= 0) {
614
    leaf = larger_leaf_splits_->leaf_index();
Guolin Ke's avatar
Guolin Ke committed
615
616
617
618
619
    auto larger_best_idx = ArrayArgs<SplitInfo>::ArgMax(larger_best);
    best_split_per_leaf_[leaf] = larger_best[larger_best_idx];
  }
}

620
621
622
623
624
625
int32_t SerialTreeLearner::ForceSplits(Tree* tree, int* left_leaf,
                                       int* right_leaf, int *cur_depth) {
  bool abort_last_forced_split = false;
  if (forced_split_json_ == nullptr) {
    return 0;
  }
626
627
628
629
  int32_t result_count = 0;
  // start at root leaf
  *left_leaf = 0;
  std::queue<std::pair<Json, int>> q;
630
  Json left = *forced_split_json_;
631
632
633
  Json right;
  bool left_smaller = true;
  std::unordered_map<int, SplitInfo> forceSplitMap;
634
  q.push(std::make_pair(left, *left_leaf));
635
636
637

  // Histogram construction require parent features.
  std::set<int> force_split_features = FindAllForceFeatures(*forced_split_json_);
638
  while (!q.empty()) {
639
    if (BeforeFindBestSplit(tree, *left_leaf, *right_leaf)) {
640
      FindBestSplits(tree, &force_split_features);
641
    }
642

643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
    // then, compute own splits
    SplitInfo left_split;
    SplitInfo right_split;

    if (!left.is_null()) {
      const int left_feature = left["feature"].int_value();
      const double left_threshold_double = left["threshold"].number_value();
      const int left_inner_feature_index = train_data_->InnerFeatureIndex(left_feature);
      const uint32_t left_threshold = train_data_->BinThreshold(
              left_inner_feature_index, left_threshold_double);
      auto leaf_histogram_array = (left_smaller) ? smaller_leaf_histogram_array_ : larger_leaf_histogram_array_;
      auto left_leaf_splits = (left_smaller) ? smaller_leaf_splits_.get() : larger_leaf_splits_.get();
      leaf_histogram_array[left_inner_feature_index].GatherInfoForThreshold(
              left_leaf_splits->sum_gradients(),
              left_leaf_splits->sum_hessians(),
              left_threshold,
              left_leaf_splits->num_data_in_leaf(),
Belinda Trotta's avatar
Belinda Trotta committed
660
              left_leaf_splits->weight(),
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
              &left_split);
      left_split.feature = left_feature;
      forceSplitMap[*left_leaf] = left_split;
      if (left_split.gain < 0) {
        forceSplitMap.erase(*left_leaf);
      }
    }

    if (!right.is_null()) {
      const int right_feature = right["feature"].int_value();
      const double right_threshold_double = right["threshold"].number_value();
      const int right_inner_feature_index = train_data_->InnerFeatureIndex(right_feature);
      const uint32_t right_threshold = train_data_->BinThreshold(
              right_inner_feature_index, right_threshold_double);
      auto leaf_histogram_array = (left_smaller) ? larger_leaf_histogram_array_ : smaller_leaf_histogram_array_;
      auto right_leaf_splits = (left_smaller) ? larger_leaf_splits_.get() : smaller_leaf_splits_.get();
      leaf_histogram_array[right_inner_feature_index].GatherInfoForThreshold(
        right_leaf_splits->sum_gradients(),
        right_leaf_splits->sum_hessians(),
        right_threshold,
        right_leaf_splits->num_data_in_leaf(),
Belinda Trotta's avatar
Belinda Trotta committed
682
        right_leaf_splits->weight(),
683
684
685
686
687
688
689
690
691
692
693
694
695
        &right_split);
      right_split.feature = right_feature;
      forceSplitMap[*right_leaf] = right_split;
      if (right_split.gain < 0) {
        forceSplitMap.erase(*right_leaf);
      }
    }

    std::pair<Json, int> pair = q.front();
    q.pop();
    int current_leaf = pair.second;
    // split info should exist because searching in bfs fashion - should have added from parent
    if (forceSplitMap.find(current_leaf) == forceSplitMap.end()) {
696
        abort_last_forced_split = true;
697
698
        break;
    }
699
700
701
702
    best_split_per_leaf_[current_leaf] = forceSplitMap[current_leaf];
    Split(tree, current_leaf, left_leaf, right_leaf);
    left_smaller = best_split_per_leaf_[current_leaf].left_count <
                   best_split_per_leaf_[current_leaf].right_count;
703
704
705
706
    left = Json();
    right = Json();
    if ((pair.first).object_items().count("left") > 0) {
      left = (pair.first)["left"];
707
708
709
      if (left.object_items().count("feature") > 0 && left.object_items().count("threshold") > 0) {
        q.push(std::make_pair(left, *left_leaf));
      }
710
711
712
    }
    if ((pair.first).object_items().count("right") > 0) {
      right = (pair.first)["right"];
713
714
715
      if (right.object_items().count("feature") > 0 && right.object_items().count("threshold") > 0) {
        q.push(std::make_pair(right, *right_leaf));
      }
716
717
718
719
    }
    result_count++;
    *(cur_depth) = std::max(*(cur_depth), tree->leaf_depth(*left_leaf));
  }
720
721
722
723
724
725
726
727
728
729
730
  if (abort_last_forced_split) {
    int best_leaf =
        static_cast<int>(ArrayArgs<SplitInfo>::ArgMax(best_split_per_leaf_));
    const SplitInfo& best_leaf_SplitInfo = best_split_per_leaf_[best_leaf];
    if (best_leaf_SplitInfo.gain <= 0.0) {
      Log::Warning("No further splits with positive gain, best gain: %f",
                   best_leaf_SplitInfo.gain);
      return config_->num_leaves;
    }
    Split(tree, best_leaf, left_leaf, right_leaf);
    *(cur_depth) = std::max(*(cur_depth), tree->leaf_depth(*left_leaf));
731
    ++result_count;
732
  }
733
734
  return result_count;
}
Guolin Ke's avatar
Guolin Ke committed
735

736
737
std::set<int> SerialTreeLearner::FindAllForceFeatures(Json force_split_leaf_setting) {
  std::set<int> force_features;
738
  std::queue<Json> force_split_leaves;
739

740
  force_split_leaves.push(force_split_leaf_setting);
741

742
743
744
  while (!force_split_leaves.empty()) {
    Json split_leaf = force_split_leaves.front();
    force_split_leaves.pop();
745
746
747
748
749
750

    const int feature_index = split_leaf["feature"].int_value();
    const int feature_inner_index = train_data_->InnerFeatureIndex(feature_index);
    force_features.insert(feature_inner_index);

    if (split_leaf.object_items().count("left") > 0) {
751
      force_split_leaves.push(split_leaf["left"]);
752
753
754
    }

    if (split_leaf.object_items().count("right") > 0) {
755
      force_split_leaves.push(split_leaf["right"]);
756
757
758
759
760
761
    }
  }

  return force_features;
}

762
763
764
void SerialTreeLearner::SplitInner(Tree* tree, int best_leaf, int* left_leaf,
                                   int* right_leaf, bool update_cnt) {
  Common::FunctionTimer fun_timer("SerialTreeLearner::SplitInner", global_timer);
765
  SplitInfo& best_split_info = best_split_per_leaf_[best_leaf];
766
767
  const int inner_feature_index =
      train_data_->InnerFeatureIndex(best_split_info.feature);
768
  if (cegb_ != nullptr) {
769
770
    cegb_->UpdateLeafBestSplits(tree, best_leaf, &best_split_info,
                                &best_split_per_leaf_);
771
  }
772
  *left_leaf = best_leaf;
773
774
  auto next_leaf_id = tree->NextLeafId();

775
  // update before tree split
776
  constraints_->BeforeSplit(best_leaf, next_leaf_id,
777
778
                            best_split_info.monotone_type);

779
780
781
  bool is_numerical_split =
      train_data_->FeatureBinMapper(inner_feature_index)->bin_type() ==
      BinType::NumericalBin;
Guolin Ke's avatar
Guolin Ke committed
782
  if (is_numerical_split) {
783
784
    auto threshold_double = train_data_->RealThreshold(
        inner_feature_index, best_split_info.threshold);
785
    data_partition_->Split(best_leaf, train_data_, inner_feature_index,
786
787
788
789
790
791
792
                           &best_split_info.threshold, 1,
                           best_split_info.default_left, next_leaf_id);
    if (update_cnt) {
      // don't need to update this in data-based parallel model
      best_split_info.left_count = data_partition_->leaf_count(*left_leaf);
      best_split_info.right_count = data_partition_->leaf_count(next_leaf_id);
    }
793
    // split tree, will return right leaf
794
795
796
797
798
799
800
801
802
    *right_leaf = tree->Split(
        best_leaf, inner_feature_index, best_split_info.feature,
        best_split_info.threshold, threshold_double,
        static_cast<double>(best_split_info.left_output),
        static_cast<double>(best_split_info.right_output),
        static_cast<data_size_t>(best_split_info.left_count),
        static_cast<data_size_t>(best_split_info.right_count),
        static_cast<double>(best_split_info.left_sum_hessian),
        static_cast<double>(best_split_info.right_sum_hessian),
803
804
        // store the true split gain in tree model
        static_cast<float>(best_split_info.gain + config_->min_gain_to_split),
805
806
        train_data_->FeatureBinMapper(inner_feature_index)->missing_type(),
        best_split_info.default_left);
807
  } else {
808
809
810
    std::vector<uint32_t> cat_bitset_inner =
        Common::ConstructBitset(best_split_info.cat_threshold.data(),
                                best_split_info.num_cat_threshold);
811
812
    std::vector<int> threshold_int(best_split_info.num_cat_threshold);
    for (int i = 0; i < best_split_info.num_cat_threshold; ++i) {
813
814
      threshold_int[i] = static_cast<int>(train_data_->RealThreshold(
          inner_feature_index, best_split_info.cat_threshold[i]));
815
    }
816
817
    std::vector<uint32_t> cat_bitset = Common::ConstructBitset(
        threshold_int.data(), best_split_info.num_cat_threshold);
818

819
    data_partition_->Split(best_leaf, train_data_, inner_feature_index,
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
                           cat_bitset_inner.data(),
                           static_cast<int>(cat_bitset_inner.size()),
                           best_split_info.default_left, next_leaf_id);

    if (update_cnt) {
      // don't need to update this in data-based parallel model
      best_split_info.left_count = data_partition_->leaf_count(*left_leaf);
      best_split_info.right_count = data_partition_->leaf_count(next_leaf_id);
    }

    *right_leaf = tree->SplitCategorical(
        best_leaf, inner_feature_index, best_split_info.feature,
        cat_bitset_inner.data(), static_cast<int>(cat_bitset_inner.size()),
        cat_bitset.data(), static_cast<int>(cat_bitset.size()),
        static_cast<double>(best_split_info.left_output),
        static_cast<double>(best_split_info.right_output),
        static_cast<data_size_t>(best_split_info.left_count),
        static_cast<data_size_t>(best_split_info.right_count),
        static_cast<double>(best_split_info.left_sum_hessian),
        static_cast<double>(best_split_info.right_sum_hessian),
840
841
        // store the true split gain in tree model
        static_cast<float>(best_split_info.gain + config_->min_gain_to_split),
842
        train_data_->FeatureBinMapper(inner_feature_index)->missing_type());
843
  }
844

845
#ifdef DEBUG
846
  CHECK(*right_leaf == next_leaf_id);
847
#endif
848

Guolin Ke's avatar
Guolin Ke committed
849
  // init the leaves that used on next iteration
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
  if (!config_->use_quantized_grad) {
    if (best_split_info.left_count < best_split_info.right_count) {
      CHECK_GT(best_split_info.left_count, 0);
      smaller_leaf_splits_->Init(*left_leaf, data_partition_.get(),
                                 best_split_info.left_sum_gradient,
                                 best_split_info.left_sum_hessian,
                                 best_split_info.left_output);
      larger_leaf_splits_->Init(*right_leaf, data_partition_.get(),
                                best_split_info.right_sum_gradient,
                                best_split_info.right_sum_hessian,
                                best_split_info.right_output);
    } else {
      CHECK_GT(best_split_info.right_count, 0);
      smaller_leaf_splits_->Init(*right_leaf, data_partition_.get(),
                                 best_split_info.right_sum_gradient,
                                 best_split_info.right_sum_hessian,
                                 best_split_info.right_output);
      larger_leaf_splits_->Init(*left_leaf, data_partition_.get(),
                                best_split_info.left_sum_gradient,
                                best_split_info.left_sum_hessian,
                                best_split_info.left_output);
    }
Guolin Ke's avatar
Guolin Ke committed
872
  } else {
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
    if (best_split_info.left_count < best_split_info.right_count) {
      CHECK_GT(best_split_info.left_count, 0);
      smaller_leaf_splits_->Init(*left_leaf, data_partition_.get(),
                                 best_split_info.left_sum_gradient,
                                 best_split_info.left_sum_hessian,
                                 best_split_info.left_sum_gradient_and_hessian,
                                 best_split_info.left_output);
      larger_leaf_splits_->Init(*right_leaf, data_partition_.get(),
                                 best_split_info.right_sum_gradient,
                                 best_split_info.right_sum_hessian,
                                 best_split_info.right_sum_gradient_and_hessian,
                                 best_split_info.right_output);
    } else {
      CHECK_GT(best_split_info.right_count, 0);
      smaller_leaf_splits_->Init(*right_leaf, data_partition_.get(),
                                 best_split_info.right_sum_gradient,
                                 best_split_info.right_sum_hessian,
                                 best_split_info.right_sum_gradient_and_hessian,
                                 best_split_info.right_output);
      larger_leaf_splits_->Init(*left_leaf, data_partition_.get(),
                                best_split_info.left_sum_gradient,
                                best_split_info.left_sum_hessian,
                                best_split_info.left_sum_gradient_and_hessian,
                                best_split_info.left_output);
    }
Guolin Ke's avatar
Guolin Ke committed
898
  }
899
900
901
902
903
  if (config_->use_quantized_grad && config_->tree_learner != std::string("data")) {
    gradient_discretizer_->SetNumBitsInHistogramBin<false>(*left_leaf, *right_leaf,
                                                    data_partition_->leaf_count(*left_leaf),
                                                    data_partition_->leaf_count(*right_leaf));
  }
904
905
906
907
908

  #ifdef DEBUG
  CheckSplit(best_split_info, *left_leaf, *right_leaf);
  #endif

909
  auto leaves_need_update = constraints_->Update(
910
      is_numerical_split, *left_leaf, *right_leaf,
911
912
913
914
915
      best_split_info.monotone_type, best_split_info.right_output,
      best_split_info.left_output, inner_feature_index, best_split_info,
      best_split_per_leaf_);
  // update leave outputs if needed
  for (auto leaf : leaves_need_update) {
916
    RecomputeBestSplitForLeaf(tree, leaf, &best_split_per_leaf_[leaf]);
917
  }
918
}
Guolin Ke's avatar
Guolin Ke committed
919

920
void SerialTreeLearner::RenewTreeOutput(Tree* tree, const ObjectiveFunction* obj, std::function<double(const label_t*, int)> residual_getter,
921
                                        data_size_t total_num_data, const data_size_t* bag_indices, data_size_t bag_cnt, const double* /*train_score*/) const {
922
  if (obj != nullptr && obj->IsRenewTreeOutput()) {
Nikita Titov's avatar
Nikita Titov committed
923
    CHECK_LE(tree->num_leaves(), data_partition_->num_leaves());
924
925
    const data_size_t* bag_mapper = nullptr;
    if (total_num_data != num_data_) {
926
      CHECK_EQ(bag_cnt, num_data_);
927
928
      bag_mapper = bag_indices;
    }
Guolin Ke's avatar
Guolin Ke committed
929
    std::vector<int> n_nozeroworker_perleaf(tree->num_leaves(), 1);
930
    int num_machines = Network::num_machines();
931
    #pragma omp parallel for num_threads(OMP_NUM_THREADS()) schedule(static)
932
933
934
935
    for (int i = 0; i < tree->num_leaves(); ++i) {
      const double output = static_cast<double>(tree->LeafOutput(i));
      data_size_t cnt_leaf_data = 0;
      auto index_mapper = data_partition_->GetIndexOnLeaf(i, &cnt_leaf_data);
Guolin Ke's avatar
Guolin Ke committed
936
937
      if (cnt_leaf_data > 0) {
        // bag_mapper[index_mapper[i]]
938
        const double new_output = obj->RenewTreeOutput(output, residual_getter, index_mapper, bag_mapper, cnt_leaf_data);
Guolin Ke's avatar
Guolin Ke committed
939
940
        tree->SetLeafOutput(i, new_output);
      } else {
941
        CHECK_GT(num_machines, 1);
Guolin Ke's avatar
Guolin Ke committed
942
943
944
945
946
947
948
949
950
        tree->SetLeafOutput(i, 0.0);
        n_nozeroworker_perleaf[i] = 0;
      }
    }
    if (num_machines > 1) {
      std::vector<double> outputs(tree->num_leaves());
      for (int i = 0; i < tree->num_leaves(); ++i) {
        outputs[i] = static_cast<double>(tree->LeafOutput(i));
      }
Guolin Ke's avatar
Guolin Ke committed
951
952
      outputs = Network::GlobalSum(&outputs);
      n_nozeroworker_perleaf = Network::GlobalSum(&n_nozeroworker_perleaf);
Guolin Ke's avatar
Guolin Ke committed
953
954
955
956
957
958
959
      for (int i = 0; i < tree->num_leaves(); ++i) {
        tree->SetLeafOutput(i, outputs[i] / n_nozeroworker_perleaf[i]);
      }
    }
  }
}

960
961
void SerialTreeLearner::ComputeBestSplitForFeature(
    FeatureHistogram* histogram_array_, int feature_index, int real_fidx,
Guolin Ke's avatar
Guolin Ke committed
962
    int8_t is_feature_used, int num_data, const LeafSplits* leaf_splits,
963
    SplitInfo* best_split, double parent_output) {
964
965
966
967
968
969
970
  bool is_feature_numerical = train_data_->FeatureBinMapper(feature_index)
                                  ->bin_type() == BinType::NumericalBin;
  if (is_feature_numerical & !config_->monotone_constraints.empty()) {
    constraints_->RecomputeConstraintsIfNeeded(
        constraints_.get(), feature_index, ~(leaf_splits->leaf_index()),
        train_data_->FeatureNumBin(feature_index));
  }
971
  SplitInfo new_split;
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
  if (config_->use_quantized_grad) {
    const uint8_t hist_bits_bin = gradient_discretizer_->GetHistBitsInLeaf<false>(leaf_splits->leaf_index());
    histogram_array_[feature_index].FindBestThresholdInt(
        leaf_splits->int_sum_gradients_and_hessians(),
        gradient_discretizer_->grad_scale(),
        gradient_discretizer_->hess_scale(),
        hist_bits_bin,
        hist_bits_bin,
        num_data,
        constraints_->GetFeatureConstraint(leaf_splits->leaf_index(), feature_index), parent_output, &new_split);
  } else {
    histogram_array_[feature_index].FindBestThreshold(
        leaf_splits->sum_gradients(), leaf_splits->sum_hessians(), num_data,
        constraints_->GetFeatureConstraint(leaf_splits->leaf_index(), feature_index), parent_output, &new_split);
  }
987
988
989
  new_split.feature = real_fidx;
  if (cegb_ != nullptr) {
    new_split.gain -=
990
        cegb_->DeltaGain(feature_index, real_fidx, leaf_splits->leaf_index(),
991
992
                         num_data, new_split);
  }
993
994
995
996
997
  if (new_split.monotone_type != 0) {
    double penalty = constraints_->ComputeMonotoneSplitGainPenalty(
        leaf_splits->leaf_index(), config_->monotone_penalty);
    new_split.gain *= penalty;
  }
Guolin Ke's avatar
Guolin Ke committed
998
999
1000
  // it is needed to filter the features after the above code.
  // Otherwise, the `is_splittable` in `FeatureHistogram` will be wrong, and cause some features being accidentally filtered in the later nodes.
  if (new_split > *best_split && is_feature_used) {
1001
1002
1003
1004
    *best_split = new_split;
  }
}

1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
double SerialTreeLearner::GetParentOutput(const Tree* tree, const LeafSplits* leaf_splits) const {
  double parent_output;
  if (tree->num_leaves() == 1) {
    // for root leaf the "parent" output is its own output because we don't apply any smoothing to the root
    parent_output = FeatureHistogram::CalculateSplittedLeafOutput<true, true, true, false>(
      leaf_splits->sum_gradients(), leaf_splits->sum_hessians(), config_->lambda_l1,
      config_->lambda_l2, config_->max_delta_step, BasicConstraint(),
      config_->path_smooth, static_cast<data_size_t>(leaf_splits->num_data_in_leaf()), 0);
  } else {
    parent_output = leaf_splits->weight();
  }
  return parent_output;
}

1019
void SerialTreeLearner::RecomputeBestSplitForLeaf(Tree* tree, int leaf, SplitInfo* split) {
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
  FeatureHistogram* histogram_array_;
  if (!histogram_pool_.Get(leaf, &histogram_array_)) {
    Log::Warning(
        "Get historical Histogram for leaf %d failed, will skip the "
        "``RecomputeBestSplitForLeaf``",
        leaf);
    return;
  }
  double sum_gradients = split->left_sum_gradient + split->right_sum_gradient;
  double sum_hessians = split->left_sum_hessian + split->right_sum_hessian;
  int num_data = split->left_count + split->right_count;

  std::vector<SplitInfo> bests(share_state_->num_threads);
Guolin Ke's avatar
Guolin Ke committed
1033
  LeafSplits leaf_splits(num_data, config_);
1034
1035
  leaf_splits.Init(leaf, sum_gradients, sum_hessians);

1036
1037
1038
1039
1040
1041
1042
1043
  // can't use GetParentOutput because leaf_splits doesn't have weight property set
  double parent_output = 0;
  if (config_->path_smooth > kEpsilon) {
    parent_output = FeatureHistogram::CalculateSplittedLeafOutput<true, true, true, false>(
      sum_gradients, sum_hessians, config_->lambda_l1, config_->lambda_l2, config_->max_delta_step,
      BasicConstraint(), config_->path_smooth, static_cast<data_size_t>(num_data), 0);
  }

1044
1045
  OMP_INIT_EX();
// find splits
1046
std::vector<int8_t> node_used_features = col_sampler_.GetByNode(tree, leaf);
1047
1048
1049
1050
1051
1052
1053
1054
1055
#pragma omp parallel for schedule(static) num_threads(share_state_->num_threads)
  for (int feature_index = 0; feature_index < num_features_; ++feature_index) {
    OMP_LOOP_EX_BEGIN();
    if (!col_sampler_.is_feature_used_bytree()[feature_index] ||
        !histogram_array_[feature_index].is_splittable()) {
      continue;
    }
    const int tid = omp_get_thread_num();
    int real_fidx = train_data_->RealFeatureIndex(feature_index);
1056
    ComputeBestSplitForFeature(histogram_array_, feature_index, real_fidx, node_used_features[feature_index],
1057
                               num_data, &leaf_splits, &bests[tid], parent_output);
1058
1059
1060
1061
1062
1063
1064
1065

    OMP_LOOP_EX_END();
  }
  OMP_THROW_EX();
  auto best_idx = ArrayArgs<SplitInfo>::ArgMax(bests);
  *split = bests[best_idx];
}

1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
#ifdef DEBUG
void SerialTreeLearner::CheckSplit(const SplitInfo& best_split_info, const int left_leaf_index, const int right_leaf_index) {
  data_size_t num_data_in_left = 0;
  data_size_t num_data_in_right = 0;
  const data_size_t* data_indices_in_left = data_partition_->GetIndexOnLeaf(left_leaf_index, &num_data_in_left);
  const data_size_t* data_indices_in_right = data_partition_->GetIndexOnLeaf(right_leaf_index, &num_data_in_right);
  if (config_->use_quantized_grad) {
    int32_t sum_left_gradient = 0;
    int32_t sum_left_hessian = 0;
    int32_t sum_right_gradient = 0;
    int32_t sum_right_hessian = 0;
    const int8_t* discretized_grad_and_hess = gradient_discretizer_->discretized_gradients_and_hessians();
    for (data_size_t i = 0; i < num_data_in_left; ++i) {
      const data_size_t index = data_indices_in_left[i];
      sum_left_gradient += discretized_grad_and_hess[2 * index + 1];
      sum_left_hessian += discretized_grad_and_hess[2 * index];
    }
    for (data_size_t i = 0; i < num_data_in_right; ++i) {
      const data_size_t index = data_indices_in_right[i];
      sum_right_gradient += discretized_grad_and_hess[2 * index + 1];
      sum_right_hessian += discretized_grad_and_hess[2 * index];
    }
    Log::Warning("============================ start leaf split info ============================");
    Log::Warning("left_leaf_index = %d, right_leaf_index = %d", left_leaf_index, right_leaf_index);
    Log::Warning("num_data_in_left = %d, num_data_in_right = %d", num_data_in_left, num_data_in_right);
    Log::Warning("sum_left_gradient = %d, best_split_info->left_sum_gradient_and_hessian.gradient = %d", sum_left_gradient,
      static_cast<int32_t>(best_split_info.left_sum_gradient_and_hessian >> 32));
    Log::Warning("sum_left_hessian = %d, best_split_info->left_sum_gradient_and_hessian.hessian = %d", sum_left_hessian,
      static_cast<int32_t>(best_split_info.left_sum_gradient_and_hessian & 0x00000000ffffffff));
    Log::Warning("sum_right_gradient = %d, best_split_info->right_sum_gradient_and_hessian.gradient = %d", sum_right_gradient,
      static_cast<int32_t>(best_split_info.right_sum_gradient_and_hessian >> 32));
    Log::Warning("sum_right_hessian = %d, best_split_info->right_sum_gradient_and_hessian.hessian = %d", sum_right_hessian,
      static_cast<int32_t>(best_split_info.right_sum_gradient_and_hessian & 0x00000000ffffffff));
    CHECK_EQ(num_data_in_left, best_split_info.left_count);
    CHECK_EQ(num_data_in_right, best_split_info.right_count);
    CHECK_EQ(sum_left_gradient, static_cast<int32_t>(best_split_info.left_sum_gradient_and_hessian >> 32))
    CHECK_EQ(sum_left_hessian, static_cast<int32_t>(best_split_info.left_sum_gradient_and_hessian & 0x00000000ffffffff));
    CHECK_EQ(sum_right_gradient, static_cast<int32_t>(best_split_info.right_sum_gradient_and_hessian >> 32));
    CHECK_EQ(sum_right_hessian, static_cast<int32_t>(best_split_info.right_sum_gradient_and_hessian & 0x00000000ffffffff));
    Log::Warning("============================ end leaf split info ============================");
  }
}
#endif

Guolin Ke's avatar
Guolin Ke committed
1110
}  // namespace LightGBM