parallel_tree_learner.h 8.25 KB
Newer Older
1
2
3
4
/*!
 * Copyright (c) 2016 Microsoft Corporation. All rights reserved.
 * Licensed under the MIT License. See LICENSE file in the project root for license information.
 */
Guolin Ke's avatar
Guolin Ke committed
5
6
7
#ifndef LIGHTGBM_TREELEARNER_PARALLEL_TREE_LEARNER_H_
#define LIGHTGBM_TREELEARNER_PARALLEL_TREE_LEARNER_H_

8
9
10
#include <LightGBM/network.h>
#include <LightGBM/utils/array_args.h>

Guolin Ke's avatar
Guolin Ke committed
11
#include <cstring>
Guolin Ke's avatar
Guolin Ke committed
12
#include <memory>
13
14
#include <vector>

15
#include "cuda_tree_learner.h"
16
17
#include "gpu_tree_learner.h"
#include "serial_tree_learner.h"
Guolin Ke's avatar
Guolin Ke committed
18
19
20
21
22

namespace LightGBM {

/*!
* \brief Feature parallel learning algorithm.
Qiwei Ye's avatar
Qiwei Ye committed
23
24
*        Different machine will find best split on different features, then sync global best split
*        It is recommonded used when #data is small or #feature is large
Guolin Ke's avatar
Guolin Ke committed
25
*/
26
27
template <typename TREELEARNER_T>
class FeatureParallelTreeLearner: public TREELEARNER_T {
Nikita Titov's avatar
Nikita Titov committed
28
 public:
Guolin Ke's avatar
Guolin Ke committed
29
  explicit FeatureParallelTreeLearner(const Config* config);
Guolin Ke's avatar
Guolin Ke committed
30
  ~FeatureParallelTreeLearner();
31
  void Init(const Dataset* train_data, bool is_constant_hessian) override;
Guolin Ke's avatar
Guolin Ke committed
32

Nikita Titov's avatar
Nikita Titov committed
33
 protected:
Guolin Ke's avatar
Guolin Ke committed
34
  void BeforeTrain() override;
35
  void FindBestSplitsFromHistograms(const std::vector<int8_t>& is_feature_used, bool use_subtract, const Tree* tree) override;
Nikita Titov's avatar
Nikita Titov committed
36
37

 private:
Guolin Ke's avatar
Guolin Ke committed
38
39
40
41
42
  /*! \brief rank of local machine */
  int rank_;
  /*! \brief Number of machines of this parallel task */
  int num_machines_;
  /*! \brief Buffer for network send */
Guolin Ke's avatar
Guolin Ke committed
43
  std::vector<char> input_buffer_;
Guolin Ke's avatar
Guolin Ke committed
44
  /*! \brief Buffer for network receive */
Guolin Ke's avatar
Guolin Ke committed
45
  std::vector<char> output_buffer_;
Guolin Ke's avatar
Guolin Ke committed
46
47
48
49
};

/*!
* \brief Data parallel learning algorithm.
Qiwei Ye's avatar
Qiwei Ye committed
50
51
*        Workers use local data to construct histograms locally, then sync up global histograms.
*        It is recommonded used when #data is large or #feature is small
Guolin Ke's avatar
Guolin Ke committed
52
*/
53
54
template <typename TREELEARNER_T>
class DataParallelTreeLearner: public TREELEARNER_T {
Nikita Titov's avatar
Nikita Titov committed
55
 public:
Guolin Ke's avatar
Guolin Ke committed
56
  explicit DataParallelTreeLearner(const Config* config);
Guolin Ke's avatar
Guolin Ke committed
57
  ~DataParallelTreeLearner();
58
  void Init(const Dataset* train_data, bool is_constant_hessian) override;
Guolin Ke's avatar
Guolin Ke committed
59
  void ResetConfig(const Config* config) override;
60

Nikita Titov's avatar
Nikita Titov committed
61
 protected:
Guolin Ke's avatar
Guolin Ke committed
62
  void BeforeTrain() override;
63
64
  void FindBestSplits(const Tree* tree) override;
  void FindBestSplitsFromHistograms(const std::vector<int8_t>& is_feature_used, bool use_subtract, const Tree* tree) override;
Guolin Ke's avatar
Guolin Ke committed
65
66
67
68
69
70
71
72
73
74
  void Split(Tree* tree, int best_Leaf, int* left_leaf, int* right_leaf) override;

  inline data_size_t GetGlobalDataCountInLeaf(int leaf_idx) const override {
    if (leaf_idx >= 0) {
      return global_data_count_in_leaf_[leaf_idx];
    } else {
      return 0;
    }
  }

Nikita Titov's avatar
Nikita Titov committed
75
 private:
Guolin Ke's avatar
Guolin Ke committed
76
77
78
79
80
  /*! \brief Rank of local machine */
  int rank_;
  /*! \brief Number of machines of this parallel task */
  int num_machines_;
  /*! \brief Buffer for network send */
Guolin Ke's avatar
Guolin Ke committed
81
  std::vector<char> input_buffer_;
Guolin Ke's avatar
Guolin Ke committed
82
  /*! \brief Buffer for network receive */
Guolin Ke's avatar
Guolin Ke committed
83
  std::vector<char> output_buffer_;
Guolin Ke's avatar
Guolin Ke committed
84
85
  /*! \brief different machines will aggregate histograms for different features,
       use this to mark local aggregate features*/
Guolin Ke's avatar
Guolin Ke committed
86
  std::vector<bool> is_feature_aggregated_;
Guolin Ke's avatar
Guolin Ke committed
87
  /*! \brief Block start index for reduce scatter */
Guolin Ke's avatar
Guolin Ke committed
88
  std::vector<comm_size_t> block_start_;
Guolin Ke's avatar
Guolin Ke committed
89
  /*! \brief Block size for reduce scatter */
Guolin Ke's avatar
Guolin Ke committed
90
  std::vector<comm_size_t> block_len_;
Hui Xue's avatar
Hui Xue committed
91
  /*! \brief Write positions for feature histograms */
Guolin Ke's avatar
Guolin Ke committed
92
  std::vector<comm_size_t> buffer_write_start_pos_;
Hui Xue's avatar
Hui Xue committed
93
  /*! \brief Read positions for local feature histograms */
Guolin Ke's avatar
Guolin Ke committed
94
  std::vector<comm_size_t> buffer_read_start_pos_;
Guolin Ke's avatar
Guolin Ke committed
95
  /*! \brief Size for reduce scatter */
Guolin Ke's avatar
Guolin Ke committed
96
  comm_size_t reduce_scatter_size_;
Guolin Ke's avatar
Guolin Ke committed
97
  /*! \brief Store global number of data in leaves  */
Guolin Ke's avatar
Guolin Ke committed
98
  std::vector<data_size_t> global_data_count_in_leaf_;
Guolin Ke's avatar
Guolin Ke committed
99
100
};

Guolin Ke's avatar
Guolin Ke committed
101
102
103
104
105
106
/*!
* \brief Voting based data parallel learning algorithm.
* Like data parallel, but not aggregate histograms for all features.
* Here using voting to reduce features, and only aggregate histograms for selected features.
* When #data is large and #feature is large, you can use this to have better speed-up
*/
107
108
template <typename TREELEARNER_T>
class VotingParallelTreeLearner: public TREELEARNER_T {
Nikita Titov's avatar
Nikita Titov committed
109
 public:
Guolin Ke's avatar
Guolin Ke committed
110
  explicit VotingParallelTreeLearner(const Config* config);
Guolin Ke's avatar
Guolin Ke committed
111
  ~VotingParallelTreeLearner() { }
112
  void Init(const Dataset* train_data, bool is_constant_hessian) override;
Guolin Ke's avatar
Guolin Ke committed
113
  void ResetConfig(const Config* config) override;
114

Nikita Titov's avatar
Nikita Titov committed
115
 protected:
Guolin Ke's avatar
Guolin Ke committed
116
  void BeforeTrain() override;
Guolin Ke's avatar
Guolin Ke committed
117
  bool BeforeFindBestSplit(const Tree* tree, int left_leaf, int right_leaf) override;
118
119
  void FindBestSplits(const Tree* tree) override;
  void FindBestSplitsFromHistograms(const std::vector<int8_t>& is_feature_used, bool use_subtract, const Tree* tree) override;
Guolin Ke's avatar
Guolin Ke committed
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
  void Split(Tree* tree, int best_Leaf, int* left_leaf, int* right_leaf) override;

  inline data_size_t GetGlobalDataCountInLeaf(int leaf_idx) const override {
    if (leaf_idx >= 0) {
      return global_data_count_in_leaf_[leaf_idx];
    } else {
      return 0;
    }
  }
  /*!
  * \brief Perform global voting
  * \param leaf_idx index of leaf
  * \param splits All splits from local voting
  * \param out Result of gobal voting, only store feature indices
  */
135
  void GlobalVoting(int leaf_idx, const std::vector<LightSplitInfo>& splits,
Guolin Ke's avatar
Guolin Ke committed
136
137
138
139
140
141
142
143
144
    std::vector<int>* out);
  /*!
  * \brief Copy local histgram to buffer
  * \param smaller_top_features Selected features for smaller leaf
  * \param larger_top_features Selected features for larger leaf
  */
  void CopyLocalHistogram(const std::vector<int>& smaller_top_features,
    const std::vector<int>& larger_top_features);

Nikita Titov's avatar
Nikita Titov committed
145
 private:
Guolin Ke's avatar
Guolin Ke committed
146
  /*! \brief Tree config used in local mode */
Guolin Ke's avatar
Guolin Ke committed
147
  Config local_config_;
Guolin Ke's avatar
Guolin Ke committed
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
  /*! \brief Voting size */
  int top_k_;
  /*! \brief Rank of local machine*/
  int rank_;
  /*! \brief Number of machines */
  int num_machines_;
  /*! \brief Buffer for network send */
  std::vector<char> input_buffer_;
  /*! \brief Buffer for network receive */
  std::vector<char> output_buffer_;
  /*! \brief different machines will aggregate histograms for different features,
  use this to mark local aggregate features*/
  std::vector<bool> smaller_is_feature_aggregated_;
  /*! \brief different machines will aggregate histograms for different features,
  use this to mark local aggregate features*/
  std::vector<bool> larger_is_feature_aggregated_;
  /*! \brief Block start index for reduce scatter */
Guolin Ke's avatar
Guolin Ke committed
165
  std::vector<comm_size_t> block_start_;
Guolin Ke's avatar
Guolin Ke committed
166
  /*! \brief Block size for reduce scatter */
Guolin Ke's avatar
Guolin Ke committed
167
  std::vector<comm_size_t> block_len_;
Guolin Ke's avatar
Guolin Ke committed
168
  /*! \brief Read positions for feature histgrams at smaller leaf */
Guolin Ke's avatar
Guolin Ke committed
169
  std::vector<comm_size_t> smaller_buffer_read_start_pos_;
Guolin Ke's avatar
Guolin Ke committed
170
  /*! \brief Read positions for feature histgrams at larger leaf */
Guolin Ke's avatar
Guolin Ke committed
171
  std::vector<comm_size_t> larger_buffer_read_start_pos_;
Guolin Ke's avatar
Guolin Ke committed
172
  /*! \brief Size for reduce scatter */
Guolin Ke's avatar
Guolin Ke committed
173
  comm_size_t reduce_scatter_size_;
Guolin Ke's avatar
Guolin Ke committed
174
175
176
177
178
179
180
181
182
183
  /*! \brief Store global number of data in leaves  */
  std::vector<data_size_t> global_data_count_in_leaf_;
  /*! \brief Store global split information for smaller leaf  */
  std::unique_ptr<LeafSplits> smaller_leaf_splits_global_;
  /*! \brief Store global split information for larger leaf  */
  std::unique_ptr<LeafSplits> larger_leaf_splits_global_;
  /*! \brief Store global histogram for smaller leaf  */
  std::unique_ptr<FeatureHistogram[]> smaller_leaf_histogram_array_global_;
  /*! \brief Store global histogram for larger leaf  */
  std::unique_ptr<FeatureHistogram[]> larger_leaf_histogram_array_global_;
Guolin Ke's avatar
Guolin Ke committed
184

185
186
  std::vector<hist_t> smaller_leaf_histogram_data_;
  std::vector<hist_t> larger_leaf_histogram_data_;
Guolin Ke's avatar
Guolin Ke committed
187
  std::vector<FeatureMetainfo> feature_metas_;
Guolin Ke's avatar
Guolin Ke committed
188
};
Guolin Ke's avatar
Guolin Ke committed
189

190
// To-do: reduce the communication cost by using bitset to communicate.
191
inline void SyncUpGlobalBestSplit(char* input_buffer_, char* output_buffer_, SplitInfo* smaller_best_split, SplitInfo* larger_best_split, int max_cat_threshold) {
192
  // sync global best info
193
  int size = SplitInfo::Size(max_cat_threshold);
194
195
  smaller_best_split->CopyTo(input_buffer_);
  larger_best_split->CopyTo(input_buffer_ + size);
196
  Network::Allreduce(input_buffer_, size * 2, size, output_buffer_,
Guolin Ke's avatar
Guolin Ke committed
197
198
                     [] (const char* src, char* dst, int size, comm_size_t len) {
    comm_size_t used_size = 0;
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
    LightSplitInfo p1, p2;
    while (used_size < len) {
      p1.CopyFrom(src);
      p2.CopyFrom(dst);
      if (p1 > p2) {
        std::memcpy(dst, src, size);
      }
      src += size;
      dst += size;
      used_size += size;
    }
  });
  // copy back
  smaller_best_split->CopyFrom(output_buffer_);
  larger_best_split->CopyFrom(output_buffer_ + size);
}

Guolin Ke's avatar
Guolin Ke committed
216
}  // namespace LightGBM
Guolin Ke's avatar
Guolin Ke committed
217
#endif   // LightGBM_TREELEARNER_PARALLEL_TREE_LEARNER_H_