c_api.cpp 115 KB
Newer Older
1
2
3
4
/*!
 * Copyright (c) 2016 Microsoft Corporation. All rights reserved.
 * Licensed under the MIT License. See LICENSE file in the project root for license information.
 */
Guolin Ke's avatar
Guolin Ke committed
5
#include <LightGBM/c_api.h>
Guolin Ke's avatar
Guolin Ke committed
6

7
#include <LightGBM/arrow.h>
Guolin Ke's avatar
Guolin Ke committed
8
9
#include <LightGBM/boosting.h>
#include <LightGBM/config.h>
10
11
12
#include <LightGBM/dataset.h>
#include <LightGBM/dataset_loader.h>
#include <LightGBM/metric.h>
13
#include <LightGBM/network.h>
14
15
#include <LightGBM/objective_function.h>
#include <LightGBM/prediction_early_stop.h>
16
#include <LightGBM/utils/byte_buffer.h>
17
18
19
20
21
#include <LightGBM/utils/common.h>
#include <LightGBM/utils/log.h>
#include <LightGBM/utils/openmp_wrapper.h>
#include <LightGBM/utils/random.h>
#include <LightGBM/utils/threading.h>
Guolin Ke's avatar
Guolin Ke committed
22

23
24
25
26
27
28
29
30
#include <string>
#include <cstdio>
#include <functional>
#include <memory>
#include <mutex>
#include <stdexcept>
#include <vector>

31
#include "application/predictor.hpp"
32
33
#include <LightGBM/utils/yamc/alternate_shared_mutex.hpp>
#include <LightGBM/utils/yamc/yamc_shared_lock.hpp>
Guolin Ke's avatar
Guolin Ke committed
34

Guolin Ke's avatar
Guolin Ke committed
35
36
namespace LightGBM {

Guolin Ke's avatar
Guolin Ke committed
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
inline int LGBM_APIHandleException(const std::exception& ex) {
  LGBM_SetLastError(ex.what());
  return -1;
}
inline int LGBM_APIHandleException(const std::string& ex) {
  LGBM_SetLastError(ex.c_str());
  return -1;
}

#define API_BEGIN() try {
#define API_END() } \
catch(std::exception& ex) { return LGBM_APIHandleException(ex); } \
catch(std::string& ex) { return LGBM_APIHandleException(ex); } \
catch(...) { return LGBM_APIHandleException("unknown exception"); } \
return 0;

53
54
55
56
57
58
#define UNIQUE_LOCK(mtx) \
std::unique_lock<yamc::alternate::shared_mutex> lock(mtx);

#define SHARED_LOCK(mtx) \
yamc::shared_lock<yamc::alternate::shared_mutex> lock(&mtx);

59
60
61
const int PREDICTOR_TYPES = 4;

// Single row predictor to abstract away caching logic
62
class SingleRowPredictorInner {
63
64
65
66
 public:
  PredictFunction predict_function;
  int64_t num_pred_in_one_row;

67
  SingleRowPredictorInner(int predict_type, Boosting* boosting, const Config& config, int start_iter, int num_iter) {
68
69
70
71
72
73
74
75
76
77
78
79
80
    bool is_predict_leaf = false;
    bool is_raw_score = false;
    bool predict_contrib = false;
    if (predict_type == C_API_PREDICT_LEAF_INDEX) {
      is_predict_leaf = true;
    } else if (predict_type == C_API_PREDICT_RAW_SCORE) {
      is_raw_score = true;
    } else if (predict_type == C_API_PREDICT_CONTRIB) {
      predict_contrib = true;
    }
    early_stop_ = config.pred_early_stop;
    early_stop_freq_ = config.pred_early_stop_freq;
    early_stop_margin_ = config.pred_early_stop_margin;
81
82
    iter_ = num_iter;
    predictor_.reset(new Predictor(boosting, start_iter, iter_, is_raw_score, is_predict_leaf, predict_contrib,
83
                                   early_stop_, early_stop_freq_, early_stop_margin_));
84
    num_pred_in_one_row = boosting->NumPredictOneRow(start_iter, iter_, is_predict_leaf, predict_contrib);
85
    predict_function = predictor_->GetPredictFunction();
Guolin Ke's avatar
Guolin Ke committed
86
    num_total_model_ = boosting->NumberOfTotalModel();
87
  }
88

89
  ~SingleRowPredictorInner() {}
90

Guolin Ke's avatar
Guolin Ke committed
91
  bool IsPredictorEqual(const Config& config, int iter, Boosting* boosting) {
92
93
94
95
96
    return early_stop_ == config.pred_early_stop &&
      early_stop_freq_ == config.pred_early_stop_freq &&
      early_stop_margin_ == config.pred_early_stop_margin &&
      iter_ == iter &&
      num_total_model_ == boosting->NumberOfTotalModel();
97
  }
Guolin Ke's avatar
Guolin Ke committed
98

99
100
101
102
103
104
105
106
107
 private:
  std::unique_ptr<Predictor> predictor_;
  bool early_stop_;
  int early_stop_freq_;
  double early_stop_margin_;
  int iter_;
  int num_total_model_;
};

108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
/*!
 * \brief Object to store resources meant for single-row Fast Predict methods.
 * 
 * For legacy reasons this is called `FastConfig` in the public C API.
 *
 * Meant to be used by the *Fast* predict methods only.
 * It stores the configuration and prediction resources for reuse across predictions.
 */
struct SingleRowPredictor {
 public:
  SingleRowPredictor(yamc::alternate::shared_mutex *booster_mutex,
             const char *parameters,
             const int data_type,
             const int32_t num_cols,
             int predict_type,
             Boosting *boosting,
             int start_iter,
             int num_iter) : config(Config::Str2Map(parameters)), data_type(data_type), num_cols(num_cols), single_row_predictor_inner(predict_type, boosting, config, start_iter, num_iter), booster_mutex(booster_mutex) {
    if (!config.predict_disable_shape_check && num_cols != boosting->MaxFeatureIdx() + 1) {
      Log::Fatal("The number of features in data (%d) is not the same as it was in training data (%d).\n"\
                 "You can set ``predict_disable_shape_check=true`` to discard this error, but please be aware what you are doing.", num_cols, boosting->MaxFeatureIdx() + 1);
    }
  }

  void Predict(std::function<std::vector<std::pair<int, double>>(int row_idx)> get_row_fun,
               double* out_result, int64_t* out_len) const {
    UNIQUE_LOCK(single_row_predictor_mutex)
    yamc::shared_lock<yamc::alternate::shared_mutex> booster_shared_lock(booster_mutex);

    auto one_row = get_row_fun(0);
    single_row_predictor_inner.predict_function(one_row, out_result);

    *out_len = single_row_predictor_inner.num_pred_in_one_row;
  }

 public:
  Config config;
  const int data_type;
  const int32_t num_cols;

 private:
  SingleRowPredictorInner single_row_predictor_inner;

  // Prevent the booster from being modified while we have a predictor relying on it during prediction
  yamc::alternate::shared_mutex *booster_mutex;

  // If several threads try to predict at the same time using the same SingleRowPredictor
  // we want them to still provide correct values, so the mutex is necessary due to the shared
  // resources in the predictor.
  // However the recommended approach is to instantiate one SingleRowPredictor per thread,
  // to avoid contention here.
  mutable yamc::alternate::shared_mutex single_row_predictor_mutex;
};

Guolin Ke's avatar
Guolin Ke committed
162
class Booster {
Nikita Titov's avatar
Nikita Titov committed
163
 public:
Guolin Ke's avatar
Guolin Ke committed
164
  explicit Booster(const char* filename) {
165
    boosting_.reset(Boosting::CreateBoosting("gbdt", filename));
166
167
  }

Guolin Ke's avatar
Guolin Ke committed
168
  Booster(const Dataset* train_data,
169
          const char* parameters) {
Guolin Ke's avatar
Guolin Ke committed
170
    auto param = Config::Str2Map(parameters);
wxchan's avatar
wxchan committed
171
    config_.Set(param);
172
    OMP_SET_NUM_THREADS(config_.num_threads);
Guolin Ke's avatar
Guolin Ke committed
173
    // create boosting
Guolin Ke's avatar
Guolin Ke committed
174
    if (config_.input_model.size() > 0) {
175
176
      Log::Warning("Continued train from model is not supported for c_api,\n"
                   "please use continued train with input score");
Guolin Ke's avatar
Guolin Ke committed
177
    }
Guolin Ke's avatar
Guolin Ke committed
178

Guolin Ke's avatar
Guolin Ke committed
179
    boosting_.reset(Boosting::CreateBoosting(config_.boosting, nullptr));
Guolin Ke's avatar
Guolin Ke committed
180

181
182
    train_data_ = train_data;
    CreateObjectiveAndMetrics();
Guolin Ke's avatar
Guolin Ke committed
183
    // initialize the boosting
Guolin Ke's avatar
Guolin Ke committed
184
    if (config_.tree_learner == std::string("feature")) {
185
      Log::Fatal("Do not support feature parallel in c api");
186
    }
Guolin Ke's avatar
Guolin Ke committed
187
    if (Network::num_machines() == 1 && config_.tree_learner != std::string("serial")) {
188
      Log::Warning("Only find one worker, will switch to serial tree learner");
Guolin Ke's avatar
Guolin Ke committed
189
      config_.tree_learner = "serial";
190
    }
Guolin Ke's avatar
Guolin Ke committed
191
    boosting_->Init(&config_, train_data_, objective_fun_.get(),
192
                    Common::ConstPtrInVectorWrapper<Metric>(train_metric_));
wxchan's avatar
wxchan committed
193
194
195
  }

  void MergeFrom(const Booster* other) {
196
    UNIQUE_LOCK(mutex_)
wxchan's avatar
wxchan committed
197
    boosting_->MergeFrom(other->boosting_.get());
Guolin Ke's avatar
Guolin Ke committed
198
199
200
201
  }

  ~Booster() {
  }
202

203
  void CreateObjectiveAndMetrics() {
Guolin Ke's avatar
Guolin Ke committed
204
    // create objective function
Guolin Ke's avatar
Guolin Ke committed
205
206
    objective_fun_.reset(ObjectiveFunction::CreateObjectiveFunction(config_.objective,
                                                                    config_));
Guolin Ke's avatar
Guolin Ke committed
207
    if (objective_fun_ == nullptr) {
208
      Log::Info("Using self-defined objective function");
Guolin Ke's avatar
Guolin Ke committed
209
210
211
212
213
214
215
216
    }
    // initialize the objective function
    if (objective_fun_ != nullptr) {
      objective_fun_->Init(train_data_->metadata(), train_data_->num_data());
    }

    // create training metric
    train_metric_.clear();
Guolin Ke's avatar
Guolin Ke committed
217
    for (auto metric_type : config_.metric) {
Guolin Ke's avatar
Guolin Ke committed
218
      auto metric = std::unique_ptr<Metric>(
Guolin Ke's avatar
Guolin Ke committed
219
        Metric::CreateMetric(metric_type, config_));
Guolin Ke's avatar
Guolin Ke committed
220
221
222
223
224
      if (metric == nullptr) { continue; }
      metric->Init(train_data_->metadata(), train_data_->num_data());
      train_metric_.push_back(std::move(metric));
    }
    train_metric_.shrink_to_fit();
225
226
227
228
  }

  void ResetTrainingData(const Dataset* train_data) {
    if (train_data != train_data_) {
229
      UNIQUE_LOCK(mutex_)
230
231
232
233
234
235
      train_data_ = train_data;
      CreateObjectiveAndMetrics();
      // reset the boosting
      boosting_->ResetTrainingData(train_data_,
                                   objective_fun_.get(), Common::ConstPtrInVectorWrapper<Metric>(train_metric_));
    }
wxchan's avatar
wxchan committed
236
237
  }

238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
  static void CheckDatasetResetConfig(
      const Config& old_config,
      const std::unordered_map<std::string, std::string>& new_param) {
    Config new_config;
    new_config.Set(new_param);
    if (new_param.count("data_random_seed") &&
        new_config.data_random_seed != old_config.data_random_seed) {
      Log::Fatal("Cannot change data_random_seed after constructed Dataset handle.");
    }
    if (new_param.count("max_bin") &&
        new_config.max_bin != old_config.max_bin) {
      Log::Fatal("Cannot change max_bin after constructed Dataset handle.");
    }
    if (new_param.count("max_bin_by_feature") &&
        new_config.max_bin_by_feature != old_config.max_bin_by_feature) {
      Log::Fatal(
          "Cannot change max_bin_by_feature after constructed Dataset handle.");
    }
    if (new_param.count("bin_construct_sample_cnt") &&
        new_config.bin_construct_sample_cnt !=
            old_config.bin_construct_sample_cnt) {
      Log::Fatal(
          "Cannot change bin_construct_sample_cnt after constructed Dataset "
          "handle.");
    }
    if (new_param.count("min_data_in_bin") &&
        new_config.min_data_in_bin != old_config.min_data_in_bin) {
      Log::Fatal(
          "Cannot change min_data_in_bin after constructed Dataset handle.");
    }
    if (new_param.count("use_missing") &&
        new_config.use_missing != old_config.use_missing) {
      Log::Fatal("Cannot change use_missing after constructed Dataset handle.");
    }
    if (new_param.count("zero_as_missing") &&
        new_config.zero_as_missing != old_config.zero_as_missing) {
      Log::Fatal(
          "Cannot change zero_as_missing after constructed Dataset handle.");
    }
    if (new_param.count("categorical_feature") &&
        new_config.categorical_feature != old_config.categorical_feature) {
      Log::Fatal(
          "Cannot change categorical_feature after constructed Dataset "
          "handle.");
    }
    if (new_param.count("feature_pre_filter") &&
        new_config.feature_pre_filter != old_config.feature_pre_filter) {
      Log::Fatal(
          "Cannot change feature_pre_filter after constructed Dataset handle.");
    }
    if (new_param.count("is_enable_sparse") &&
        new_config.is_enable_sparse != old_config.is_enable_sparse) {
      Log::Fatal(
          "Cannot change is_enable_sparse after constructed Dataset handle.");
    }
    if (new_param.count("pre_partition") &&
        new_config.pre_partition != old_config.pre_partition) {
      Log::Fatal(
          "Cannot change pre_partition after constructed Dataset handle.");
    }
    if (new_param.count("enable_bundle") &&
        new_config.enable_bundle != old_config.enable_bundle) {
      Log::Fatal(
          "Cannot change enable_bundle after constructed Dataset handle.");
    }
    if (new_param.count("header") && new_config.header != old_config.header) {
      Log::Fatal("Cannot change header after constructed Dataset handle.");
    }
    if (new_param.count("two_round") &&
        new_config.two_round != old_config.two_round) {
      Log::Fatal("Cannot change two_round after constructed Dataset handle.");
    }
    if (new_param.count("label_column") &&
        new_config.label_column != old_config.label_column) {
      Log::Fatal(
          "Cannot change label_column after constructed Dataset handle.");
    }
    if (new_param.count("weight_column") &&
        new_config.weight_column != old_config.weight_column) {
      Log::Fatal(
          "Cannot change weight_column after constructed Dataset handle.");
    }
    if (new_param.count("group_column") &&
        new_config.group_column != old_config.group_column) {
      Log::Fatal(
          "Cannot change group_column after constructed Dataset handle.");
    }
    if (new_param.count("ignore_column") &&
        new_config.ignore_column != old_config.ignore_column) {
      Log::Fatal(
          "Cannot change ignore_column after constructed Dataset handle.");
    }
    if (new_param.count("forcedbins_filename")) {
      Log::Fatal("Cannot change forced bins after constructed Dataset handle.");
    }
    if (new_param.count("min_data_in_leaf") &&
        new_config.min_data_in_leaf < old_config.min_data_in_leaf &&
        old_config.feature_pre_filter) {
      Log::Fatal(
          "Reducing `min_data_in_leaf` with `feature_pre_filter=true` may "
          "cause unexpected behaviour "
          "for features that were pre-filtered by the larger "
          "`min_data_in_leaf`.\n"
          "You need to set `feature_pre_filter=false` to dynamically change "
          "the `min_data_in_leaf`.");
    }
Nikita Titov's avatar
Nikita Titov committed
344
    if (new_param.count("linear_tree") && new_config.linear_tree != old_config.linear_tree) {
345
      Log::Fatal("Cannot change linear_tree after constructed Dataset handle.");
346
    }
Nikita Titov's avatar
Nikita Titov committed
347
348
349
350
    if (new_param.count("precise_float_parser") &&
        new_config.precise_float_parser != old_config.precise_float_parser) {
      Log::Fatal("Cannot change precise_float_parser after constructed Dataset handle.");
    }
351
352
  }

wxchan's avatar
wxchan committed
353
  void ResetConfig(const char* parameters) {
354
    UNIQUE_LOCK(mutex_)
Guolin Ke's avatar
Guolin Ke committed
355
    auto param = Config::Str2Map(parameters);
356
357
358
    Config new_config;
    new_config.Set(param);
    if (param.count("num_class") && new_config.num_class != config_.num_class) {
359
      Log::Fatal("Cannot change num_class during training");
wxchan's avatar
wxchan committed
360
    }
361
    if (param.count("boosting") && new_config.boosting != config_.boosting) {
Guolin Ke's avatar
Guolin Ke committed
362
      Log::Fatal("Cannot change boosting during training");
wxchan's avatar
wxchan committed
363
    }
364
    if (param.count("metric") && new_config.metric != config_.metric) {
365
      Log::Fatal("Cannot change metric during training");
Guolin Ke's avatar
Guolin Ke committed
366
    }
367
368
    CheckDatasetResetConfig(config_, param);

Guolin Ke's avatar
Guolin Ke committed
369
    config_.Set(param);
370

371
    OMP_SET_NUM_THREADS(config_.num_threads);
Guolin Ke's avatar
Guolin Ke committed
372
373
374

    if (param.count("objective")) {
      // create objective function
Guolin Ke's avatar
Guolin Ke committed
375
376
      objective_fun_.reset(ObjectiveFunction::CreateObjectiveFunction(config_.objective,
                                                                      config_));
Guolin Ke's avatar
Guolin Ke committed
377
      if (objective_fun_ == nullptr) {
378
        Log::Info("Using self-defined objective function");
Guolin Ke's avatar
Guolin Ke committed
379
380
381
382
383
      }
      // initialize the objective function
      if (objective_fun_ != nullptr) {
        objective_fun_->Init(train_data_->metadata(), train_data_->num_data());
      }
384
385
      boosting_->ResetTrainingData(train_data_,
                                   objective_fun_.get(), Common::ConstPtrInVectorWrapper<Metric>(train_metric_));
wxchan's avatar
wxchan committed
386
    }
Guolin Ke's avatar
Guolin Ke committed
387

Guolin Ke's avatar
Guolin Ke committed
388
    boosting_->ResetConfig(&config_);
wxchan's avatar
wxchan committed
389
390
391
  }

  void AddValidData(const Dataset* valid_data) {
392
    UNIQUE_LOCK(mutex_)
wxchan's avatar
wxchan committed
393
    valid_metrics_.emplace_back();
Guolin Ke's avatar
Guolin Ke committed
394
395
    for (auto metric_type : config_.metric) {
      auto metric = std::unique_ptr<Metric>(Metric::CreateMetric(metric_type, config_));
wxchan's avatar
wxchan committed
396
397
398
399
400
401
      if (metric == nullptr) { continue; }
      metric->Init(valid_data->metadata(), valid_data->num_data());
      valid_metrics_.back().push_back(std::move(metric));
    }
    valid_metrics_.back().shrink_to_fit();
    boosting_->AddValidDataset(valid_data,
402
                               Common::ConstPtrInVectorWrapper<Metric>(valid_metrics_.back()));
wxchan's avatar
wxchan committed
403
  }
Guolin Ke's avatar
Guolin Ke committed
404

405
  bool TrainOneIter() {
406
    UNIQUE_LOCK(mutex_)
Guolin Ke's avatar
Guolin Ke committed
407
    return boosting_->TrainOneIter(nullptr, nullptr);
408
409
  }

Guolin Ke's avatar
Guolin Ke committed
410
  void Refit(const int32_t* leaf_preds, int32_t nrow, int32_t ncol) {
411
    UNIQUE_LOCK(mutex_)
Guolin Ke's avatar
Guolin Ke committed
412
413
414
    std::vector<std::vector<int32_t>> v_leaf_preds(nrow, std::vector<int32_t>(ncol, 0));
    for (int i = 0; i < nrow; ++i) {
      for (int j = 0; j < ncol; ++j) {
415
        v_leaf_preds[i][j] = leaf_preds[static_cast<size_t>(i) * static_cast<size_t>(ncol) + static_cast<size_t>(j)];
Guolin Ke's avatar
Guolin Ke committed
416
417
418
419
420
      }
    }
    boosting_->RefitTree(v_leaf_preds);
  }

421
  bool TrainOneIter(const score_t* gradients, const score_t* hessians) {
422
    UNIQUE_LOCK(mutex_)
Guolin Ke's avatar
Guolin Ke committed
423
    return boosting_->TrainOneIter(gradients, hessians);
424
425
  }

wxchan's avatar
wxchan committed
426
  void RollbackOneIter() {
427
    UNIQUE_LOCK(mutex_)
wxchan's avatar
wxchan committed
428
429
430
    boosting_->RollbackOneIter();
  }

431
  void SetSingleRowPredictorInner(int start_iteration, int num_iteration, int predict_type, const Config& config) {
432
433
434
      UNIQUE_LOCK(mutex_)
      if (single_row_predictor_[predict_type].get() == nullptr ||
          !single_row_predictor_[predict_type]->IsPredictorEqual(config, num_iteration, boosting_.get())) {
435
        single_row_predictor_[predict_type].reset(new SingleRowPredictorInner(predict_type, boosting_.get(),
436
                                                                         config, start_iteration, num_iteration));
437
438
439
      }
  }

440
441
442
443
444
445
446
447
448
449
450
  std::unique_ptr<SingleRowPredictor> InitSingleRowPredictor(int predict_type, int start_iteration, int num_iteration, int data_type, int32_t num_cols, const char *parameters) {
    // Workaround https://github.com/microsoft/LightGBM/issues/6142 by locking here
    // This is only a workaround because if predictors are initialized differently it may still behave incorrectly,
    // and because multiple racing Predictor initializations through LGBM_BoosterPredictForMat suffers from that same issue of Predictor init writing things in the booster.
    // Once #6142 is fixed (predictor doesn't write in the Booster as should have been the case since 1c35c3b9ede9adab8ccc5fd7b4b2b6af188a79f0), this line can be removed.
    UNIQUE_LOCK(mutex_)

    return std::unique_ptr<SingleRowPredictor>(new SingleRowPredictor(
      &mutex_, parameters, data_type, num_cols, predict_type, boosting_.get(), start_iteration, num_iteration));
  }

451
  void PredictSingleRow(int predict_type, int ncol,
452
453
               std::function<std::vector<std::pair<int, double>>(int row_idx)> get_row_fun,
               const Config& config,
454
               double* out_result, int64_t* out_len) const {
455
456
457
    if (!config.predict_disable_shape_check && ncol != boosting_->MaxFeatureIdx() + 1) {
      Log::Fatal("The number of features in data (%d) is not the same as it was in training data (%d).\n"\
                 "You can set ``predict_disable_shape_check=true`` to discard this error, but please be aware what you are doing.", ncol, boosting_->MaxFeatureIdx() + 1);
458
    }
459
    UNIQUE_LOCK(mutex_)
460
    const auto& single_row_predictor = single_row_predictor_[predict_type];
461
462
    auto one_row = get_row_fun(0);
    auto pred_wrt_ptr = out_result;
463
    single_row_predictor->predict_function(one_row, pred_wrt_ptr);
464

465
    *out_len = single_row_predictor->num_pred_in_one_row;
466
467
  }

468
  Predictor CreatePredictor(int start_iteration, int num_iteration, int predict_type, int ncol, const Config& config) const {
469
470
471
    if (!config.predict_disable_shape_check && ncol != boosting_->MaxFeatureIdx() + 1) {
      Log::Fatal("The number of features in data (%d) is not the same as it was in training data (%d).\n" \
                 "You can set ``predict_disable_shape_check=true`` to discard this error, but please be aware what you are doing.", ncol, boosting_->MaxFeatureIdx() + 1);
472
    }
Guolin Ke's avatar
Guolin Ke committed
473
474
    bool is_predict_leaf = false;
    bool is_raw_score = false;
Guolin Ke's avatar
Guolin Ke committed
475
    bool predict_contrib = false;
Guolin Ke's avatar
Guolin Ke committed
476
    if (predict_type == C_API_PREDICT_LEAF_INDEX) {
Guolin Ke's avatar
Guolin Ke committed
477
      is_predict_leaf = true;
Guolin Ke's avatar
Guolin Ke committed
478
    } else if (predict_type == C_API_PREDICT_RAW_SCORE) {
Guolin Ke's avatar
Guolin Ke committed
479
      is_raw_score = true;
480
    } else if (predict_type == C_API_PREDICT_CONTRIB) {
Guolin Ke's avatar
Guolin Ke committed
481
      predict_contrib = true;
Guolin Ke's avatar
Guolin Ke committed
482
483
    } else {
      is_raw_score = false;
Guolin Ke's avatar
Guolin Ke committed
484
    }
Guolin Ke's avatar
Guolin Ke committed
485

486
    return Predictor(boosting_.get(), start_iteration, num_iteration, is_raw_score, is_predict_leaf, predict_contrib,
487
                        config.pred_early_stop, config.pred_early_stop_freq, config.pred_early_stop_margin);
488
489
  }

490
  void Predict(int start_iteration, int num_iteration, int predict_type, int nrow, int ncol,
491
492
               std::function<std::vector<std::pair<int, double>>(int row_idx)> get_row_fun,
               const Config& config,
493
494
               double* out_result, int64_t* out_len) const {
    SHARED_LOCK(mutex_);
495
    auto predictor = CreatePredictor(start_iteration, num_iteration, predict_type, ncol, config);
496
497
498
499
500
501
502
    bool is_predict_leaf = false;
    bool predict_contrib = false;
    if (predict_type == C_API_PREDICT_LEAF_INDEX) {
      is_predict_leaf = true;
    } else if (predict_type == C_API_PREDICT_CONTRIB) {
      predict_contrib = true;
    }
503
    int64_t num_pred_in_one_row = boosting_->NumPredictOneRow(start_iteration, num_iteration, is_predict_leaf, predict_contrib);
Guolin Ke's avatar
Guolin Ke committed
504
    auto pred_fun = predictor.GetPredictFunction();
505
    OMP_INIT_EX();
506
    #pragma omp parallel for num_threads(OMP_NUM_THREADS()) schedule(static)
Guolin Ke's avatar
Guolin Ke committed
507
    for (int i = 0; i < nrow; ++i) {
508
      OMP_LOOP_EX_BEGIN();
Guolin Ke's avatar
Guolin Ke committed
509
      auto one_row = get_row_fun(i);
Tony-Y's avatar
Tony-Y committed
510
      auto pred_wrt_ptr = out_result + static_cast<size_t>(num_pred_in_one_row) * i;
Guolin Ke's avatar
Guolin Ke committed
511
      pred_fun(one_row, pred_wrt_ptr);
512
      OMP_LOOP_EX_END();
Guolin Ke's avatar
Guolin Ke committed
513
    }
514
    OMP_THROW_EX();
515
    *out_len = num_pred_in_one_row * nrow;
Guolin Ke's avatar
Guolin Ke committed
516
517
  }

518
  void PredictSparse(int start_iteration, int num_iteration, int predict_type, int64_t nrow, int ncol,
519
520
521
522
                     std::function<std::vector<std::pair<int, double>>(int64_t row_idx)> get_row_fun,
                     const Config& config, int64_t* out_elements_size,
                     std::vector<std::vector<std::unordered_map<int, double>>>* agg_ptr,
                     int32_t** out_indices, void** out_data, int data_type,
523
                     bool* is_data_float32_ptr, int num_matrices) const {
524
    auto predictor = CreatePredictor(start_iteration, num_iteration, predict_type, ncol, config);
525
526
527
    auto pred_sparse_fun = predictor.GetPredictSparseFunction();
    std::vector<std::vector<std::unordered_map<int, double>>>& agg = *agg_ptr;
    OMP_INIT_EX();
528
    #pragma omp parallel for num_threads(OMP_NUM_THREADS()) schedule(static)
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
    for (int64_t i = 0; i < nrow; ++i) {
      OMP_LOOP_EX_BEGIN();
      auto one_row = get_row_fun(i);
      agg[i] = std::vector<std::unordered_map<int, double>>(num_matrices);
      pred_sparse_fun(one_row, &agg[i]);
      OMP_LOOP_EX_END();
    }
    OMP_THROW_EX();
    // calculate the nonzero data and indices size
    int64_t elements_size = 0;
    for (int64_t i = 0; i < static_cast<int64_t>(agg.size()); ++i) {
      auto row_vector = agg[i];
      for (int j = 0; j < static_cast<int>(row_vector.size()); ++j) {
        elements_size += static_cast<int64_t>(row_vector[j].size());
      }
    }
    *out_elements_size = elements_size;
    *is_data_float32_ptr = false;
    // allocate data and indices arrays
    if (data_type == C_API_DTYPE_FLOAT32) {
      *out_data = new float[elements_size];
      *is_data_float32_ptr = true;
    } else if (data_type == C_API_DTYPE_FLOAT64) {
      *out_data = new double[elements_size];
    } else {
      Log::Fatal("Unknown data type in PredictSparse");
      return;
    }
    *out_indices = new int32_t[elements_size];
  }

560
  void PredictSparseCSR(int start_iteration, int num_iteration, int predict_type, int64_t nrow, int ncol,
561
562
563
                        std::function<std::vector<std::pair<int, double>>(int64_t row_idx)> get_row_fun,
                        const Config& config,
                        int64_t* out_len, void** out_indptr, int indptr_type,
564
565
                        int32_t** out_indices, void** out_data, int data_type) const {
    SHARED_LOCK(mutex_);
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
    // Get the number of trees per iteration (for multiclass scenario we output multiple sparse matrices)
    int num_matrices = boosting_->NumModelPerIteration();
    bool is_indptr_int32 = false;
    bool is_data_float32 = false;
    int64_t indptr_size = (nrow + 1) * num_matrices;
    if (indptr_type == C_API_DTYPE_INT32) {
      *out_indptr = new int32_t[indptr_size];
      is_indptr_int32 = true;
    } else if (indptr_type == C_API_DTYPE_INT64) {
      *out_indptr = new int64_t[indptr_size];
    } else {
      Log::Fatal("Unknown indptr type in PredictSparseCSR");
      return;
    }
    // aggregated per row feature contribution results
    std::vector<std::vector<std::unordered_map<int, double>>> agg(nrow);
    int64_t elements_size = 0;
583
    PredictSparse(start_iteration, num_iteration, predict_type, nrow, ncol, get_row_fun, config, &elements_size, &agg,
584
585
586
                  out_indices, out_data, data_type, &is_data_float32, num_matrices);
    std::vector<int> row_sizes(num_matrices * nrow);
    std::vector<int64_t> row_matrix_offsets(num_matrices * nrow);
587
    std::vector<int64_t> matrix_offsets(num_matrices);
588
589
590
591
592
593
594
595
596
597
598
599
600
601
    int64_t row_vector_cnt = 0;
    for (int m = 0; m < num_matrices; ++m) {
      for (int64_t i = 0; i < static_cast<int64_t>(agg.size()); ++i) {
        auto row_vector = agg[i];
        auto row_vector_size = row_vector[m].size();
        // keep track of the row_vector sizes for parallelization
        row_sizes[row_vector_cnt] = static_cast<int>(row_vector_size);
        if (i == 0) {
          row_matrix_offsets[row_vector_cnt] = 0;
        } else {
          row_matrix_offsets[row_vector_cnt] = static_cast<int64_t>(row_sizes[row_vector_cnt - 1] + row_matrix_offsets[row_vector_cnt - 1]);
        }
        row_vector_cnt++;
      }
602
603
604
605
606
607
      if (m == 0) {
        matrix_offsets[m] = 0;
      }
      if (m + 1 < num_matrices) {
        matrix_offsets[m + 1] = static_cast<int64_t>(matrix_offsets[m] + row_matrix_offsets[row_vector_cnt - 1] + row_sizes[row_vector_cnt - 1]);
      }
608
609
610
611
612
613
614
615
616
617
618
619
    }
    // copy vector results to output for each row
    int64_t indptr_index = 0;
    for (int m = 0; m < num_matrices; ++m) {
      if (is_indptr_int32) {
        (reinterpret_cast<int32_t*>(*out_indptr))[indptr_index] = 0;
      } else {
        (reinterpret_cast<int64_t*>(*out_indptr))[indptr_index] = 0;
      }
      indptr_index++;
      int64_t matrix_start_index = m * static_cast<int64_t>(agg.size());
      OMP_INIT_EX();
620
      #pragma omp parallel for num_threads(OMP_NUM_THREADS()) schedule(static)
621
622
623
624
      for (int64_t i = 0; i < static_cast<int64_t>(agg.size()); ++i) {
        OMP_LOOP_EX_BEGIN();
        auto row_vector = agg[i];
        int64_t row_start_index = matrix_start_index + i;
625
        int64_t element_index = row_matrix_offsets[row_start_index] + matrix_offsets[m];
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
        int64_t indptr_loop_index = indptr_index + i;
        for (auto it = row_vector[m].begin(); it != row_vector[m].end(); ++it) {
          (*out_indices)[element_index] = it->first;
          if (is_data_float32) {
            (reinterpret_cast<float*>(*out_data))[element_index] = static_cast<float>(it->second);
          } else {
            (reinterpret_cast<double*>(*out_data))[element_index] = it->second;
          }
          element_index++;
        }
        int64_t indptr_value = row_matrix_offsets[row_start_index] + row_sizes[row_start_index];
        if (is_indptr_int32) {
          (reinterpret_cast<int32_t*>(*out_indptr))[indptr_loop_index] = static_cast<int32_t>(indptr_value);
        } else {
          (reinterpret_cast<int64_t*>(*out_indptr))[indptr_loop_index] = indptr_value;
        }
        OMP_LOOP_EX_END();
      }
      OMP_THROW_EX();
      indptr_index += static_cast<int64_t>(agg.size());
    }
    out_len[0] = elements_size;
    out_len[1] = indptr_size;
  }

651
  void PredictSparseCSC(int start_iteration, int num_iteration, int predict_type, int64_t nrow, int ncol,
652
653
654
                        std::function<std::vector<std::pair<int, double>>(int64_t row_idx)> get_row_fun,
                        const Config& config,
                        int64_t* out_len, void** out_col_ptr, int col_ptr_type,
655
656
                        int32_t** out_indices, void** out_data, int data_type) const {
    SHARED_LOCK(mutex_);
657
658
    // Get the number of trees per iteration (for multiclass scenario we output multiple sparse matrices)
    int num_matrices = boosting_->NumModelPerIteration();
659
    auto predictor = CreatePredictor(start_iteration, num_iteration, predict_type, ncol, config);
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
    auto pred_sparse_fun = predictor.GetPredictSparseFunction();
    bool is_col_ptr_int32 = false;
    bool is_data_float32 = false;
    int num_output_cols = ncol + 1;
    int col_ptr_size = (num_output_cols + 1) * num_matrices;
    if (col_ptr_type == C_API_DTYPE_INT32) {
      *out_col_ptr = new int32_t[col_ptr_size];
      is_col_ptr_int32 = true;
    } else if (col_ptr_type == C_API_DTYPE_INT64) {
      *out_col_ptr = new int64_t[col_ptr_size];
    } else {
      Log::Fatal("Unknown col_ptr type in PredictSparseCSC");
      return;
    }
    // aggregated per row feature contribution results
    std::vector<std::vector<std::unordered_map<int, double>>> agg(nrow);
    int64_t elements_size = 0;
677
    PredictSparse(start_iteration, num_iteration, predict_type, nrow, ncol, get_row_fun, config, &elements_size, &agg,
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
                  out_indices, out_data, data_type, &is_data_float32, num_matrices);
    // calculate number of elements per column to construct
    // the CSC matrix with random access
    std::vector<std::vector<int64_t>> column_sizes(num_matrices);
    for (int m = 0; m < num_matrices; ++m) {
      column_sizes[m] = std::vector<int64_t>(num_output_cols, 0);
      for (int64_t i = 0; i < static_cast<int64_t>(agg.size()); ++i) {
        auto row_vector = agg[i];
        for (auto it = row_vector[m].begin(); it != row_vector[m].end(); ++it) {
          column_sizes[m][it->first] += 1;
        }
      }
    }
    // keep track of column counts
    std::vector<std::vector<int64_t>> column_counts(num_matrices);
    // keep track of beginning index for each column
    std::vector<std::vector<int64_t>> column_start_indices(num_matrices);
    // keep track of beginning index for each matrix
    std::vector<int64_t> matrix_start_indices(num_matrices, 0);
    int col_ptr_index = 0;
    for (int m = 0; m < num_matrices; ++m) {
      int64_t col_ptr_value = 0;
      column_start_indices[m] = std::vector<int64_t>(num_output_cols, 0);
      column_counts[m] = std::vector<int64_t>(num_output_cols, 0);
      if (is_col_ptr_int32) {
        (reinterpret_cast<int32_t*>(*out_col_ptr))[col_ptr_index] = static_cast<int32_t>(col_ptr_value);
      } else {
        (reinterpret_cast<int64_t*>(*out_col_ptr))[col_ptr_index] = col_ptr_value;
      }
      col_ptr_index++;
      for (int64_t i = 1; i < static_cast<int64_t>(column_sizes[m].size()); ++i) {
        column_start_indices[m][i] = column_sizes[m][i - 1] + column_start_indices[m][i - 1];
        if (is_col_ptr_int32) {
          (reinterpret_cast<int32_t*>(*out_col_ptr))[col_ptr_index] = static_cast<int32_t>(column_start_indices[m][i]);
        } else {
          (reinterpret_cast<int64_t*>(*out_col_ptr))[col_ptr_index] = column_start_indices[m][i];
        }
        col_ptr_index++;
      }
      int64_t last_elem_index = static_cast<int64_t>(column_sizes[m].size()) - 1;
      int64_t last_column_start_index = column_start_indices[m][last_elem_index];
      int64_t last_column_size = column_sizes[m][last_elem_index];
      if (is_col_ptr_int32) {
        (reinterpret_cast<int32_t*>(*out_col_ptr))[col_ptr_index] = static_cast<int32_t>(last_column_start_index + last_column_size);
      } else {
        (reinterpret_cast<int64_t*>(*out_col_ptr))[col_ptr_index] = last_column_start_index + last_column_size;
      }
725
726
      if (m + 1 < num_matrices) {
        matrix_start_indices[m + 1] = matrix_start_indices[m] + last_column_start_index + last_column_size;
727
      }
728
      col_ptr_index++;
729
    }
730
731
    // Note: we parallelize across matrices instead of rows because of the column_counts[m][col_idx] increment inside the loop
    OMP_INIT_EX();
732
    #pragma omp parallel for num_threads(OMP_NUM_THREADS()) schedule(static)
733
    for (int m = 0; m < num_matrices; ++m) {
734
      OMP_LOOP_EX_BEGIN();
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
      for (int64_t i = 0; i < static_cast<int64_t>(agg.size()); ++i) {
        auto row_vector = agg[i];
        for (auto it = row_vector[m].begin(); it != row_vector[m].end(); ++it) {
          int64_t col_idx = it->first;
          int64_t element_index = column_start_indices[m][col_idx] +
            matrix_start_indices[m] +
            column_counts[m][col_idx];
          // store the row index
          (*out_indices)[element_index] = static_cast<int32_t>(i);
          // update column count
          column_counts[m][col_idx]++;
          if (is_data_float32) {
            (reinterpret_cast<float*>(*out_data))[element_index] = static_cast<float>(it->second);
          } else {
            (reinterpret_cast<double*>(*out_data))[element_index] = it->second;
          }
        }
      }
753
      OMP_LOOP_EX_END();
754
    }
755
    OMP_THROW_EX();
756
757
758
759
    out_len[0] = elements_size;
    out_len[1] = col_ptr_size;
  }

760
  void Predict(int start_iteration, int num_iteration, int predict_type, const char* data_filename,
Guolin Ke's avatar
Guolin Ke committed
761
               int data_has_header, const Config& config,
762
763
               const char* result_filename) const {
    SHARED_LOCK(mutex_)
Guolin Ke's avatar
Guolin Ke committed
764
765
    bool is_predict_leaf = false;
    bool is_raw_score = false;
Guolin Ke's avatar
Guolin Ke committed
766
    bool predict_contrib = false;
Guolin Ke's avatar
Guolin Ke committed
767
768
769
770
    if (predict_type == C_API_PREDICT_LEAF_INDEX) {
      is_predict_leaf = true;
    } else if (predict_type == C_API_PREDICT_RAW_SCORE) {
      is_raw_score = true;
771
    } else if (predict_type == C_API_PREDICT_CONTRIB) {
Guolin Ke's avatar
Guolin Ke committed
772
      predict_contrib = true;
Guolin Ke's avatar
Guolin Ke committed
773
774
775
    } else {
      is_raw_score = false;
    }
776
    Predictor predictor(boosting_.get(), start_iteration, num_iteration, is_raw_score, is_predict_leaf, predict_contrib,
777
                        config.pred_early_stop, config.pred_early_stop_freq, config.pred_early_stop_margin);
Guolin Ke's avatar
Guolin Ke committed
778
    bool bool_data_has_header = data_has_header > 0 ? true : false;
Chen Yufei's avatar
Chen Yufei committed
779
780
    predictor.Predict(data_filename, result_filename, bool_data_has_header, config.predict_disable_shape_check,
                      config.precise_float_parser);
Guolin Ke's avatar
Guolin Ke committed
781
782
  }

783
  void GetPredictAt(int data_idx, double* out_result, int64_t* out_len) const {
wxchan's avatar
wxchan committed
784
785
786
    boosting_->GetPredictAt(data_idx, out_result, out_len);
  }

787
  void SaveModelToFile(int start_iteration, int num_iteration, int feature_importance_type, const char* filename) const {
788
    boosting_->SaveModelToFile(start_iteration, num_iteration, feature_importance_type, filename);
Guolin Ke's avatar
Guolin Ke committed
789
  }
790

791
  void LoadModelFromString(const char* model_str) {
792
793
    size_t len = std::strlen(model_str);
    boosting_->LoadModelFromString(model_str, len);
794
795
  }

796
  std::string SaveModelToString(int start_iteration, int num_iteration,
797
                                int feature_importance_type) const {
798
799
    return boosting_->SaveModelToString(start_iteration,
                                        num_iteration, feature_importance_type);
800
801
  }

802
  std::string DumpModel(int start_iteration, int num_iteration,
803
                        int feature_importance_type) const {
804
805
    return boosting_->DumpModel(start_iteration, num_iteration,
                                feature_importance_type);
wxchan's avatar
wxchan committed
806
  }
807

808
  std::vector<double> FeatureImportance(int num_iteration, int importance_type) const {
809
810
811
    return boosting_->FeatureImportance(num_iteration, importance_type);
  }

812
  double UpperBoundValue() const {
813
    SHARED_LOCK(mutex_)
814
815
816
817
    return boosting_->GetUpperBoundValue();
  }

  double LowerBoundValue() const {
818
    SHARED_LOCK(mutex_)
819
820
821
    return boosting_->GetLowerBoundValue();
  }

Guolin Ke's avatar
Guolin Ke committed
822
  double GetLeafValue(int tree_idx, int leaf_idx) const {
823
    SHARED_LOCK(mutex_)
Guolin Ke's avatar
Guolin Ke committed
824
    return dynamic_cast<GBDTBase*>(boosting_.get())->GetLeafValue(tree_idx, leaf_idx);
Guolin Ke's avatar
Guolin Ke committed
825
826
827
  }

  void SetLeafValue(int tree_idx, int leaf_idx, double val) {
828
    UNIQUE_LOCK(mutex_)
Guolin Ke's avatar
Guolin Ke committed
829
    dynamic_cast<GBDTBase*>(boosting_.get())->SetLeafValue(tree_idx, leaf_idx, val);
Guolin Ke's avatar
Guolin Ke committed
830
831
  }

832
  void ShuffleModels(int start_iter, int end_iter) {
833
    UNIQUE_LOCK(mutex_)
834
    boosting_->ShuffleModels(start_iter, end_iter);
835
836
  }

wxchan's avatar
wxchan committed
837
  int GetEvalCounts() const {
838
    SHARED_LOCK(mutex_)
wxchan's avatar
wxchan committed
839
840
841
842
843
844
    int ret = 0;
    for (const auto& metric : train_metric_) {
      ret += static_cast<int>(metric->GetName().size());
    }
    return ret;
  }
845

846
  int GetEvalNames(char** out_strs, const int len, const size_t buffer_len, size_t *out_buffer_len) const {
847
    SHARED_LOCK(mutex_)
848
    *out_buffer_len = 0;
wxchan's avatar
wxchan committed
849
850
851
    int idx = 0;
    for (const auto& metric : train_metric_) {
      for (const auto& name : metric->GetName()) {
852
853
854
855
856
        if (idx < len) {
          std::memcpy(out_strs[idx], name.c_str(), std::min(name.size() + 1, buffer_len));
          out_strs[idx][buffer_len - 1] = '\0';
        }
        *out_buffer_len = std::max(name.size() + 1, *out_buffer_len);
wxchan's avatar
wxchan committed
857
858
859
860
861
862
        ++idx;
      }
    }
    return idx;
  }

863
  int GetFeatureNames(char** out_strs, const int len, const size_t buffer_len, size_t *out_buffer_len) const {
864
    SHARED_LOCK(mutex_)
865
    *out_buffer_len = 0;
wxchan's avatar
wxchan committed
866
867
    int idx = 0;
    for (const auto& name : boosting_->FeatureNames()) {
868
869
870
871
872
      if (idx < len) {
        std::memcpy(out_strs[idx], name.c_str(), std::min(name.size() + 1, buffer_len));
        out_strs[idx][buffer_len - 1] = '\0';
      }
      *out_buffer_len = std::max(name.size() + 1, *out_buffer_len);
wxchan's avatar
wxchan committed
873
874
875
876
877
      ++idx;
    }
    return idx;
  }

wxchan's avatar
wxchan committed
878
  const Boosting* GetBoosting() const { return boosting_.get(); }
Guolin Ke's avatar
Guolin Ke committed
879

Nikita Titov's avatar
Nikita Titov committed
880
 private:
wxchan's avatar
wxchan committed
881
  const Dataset* train_data_;
Guolin Ke's avatar
Guolin Ke committed
882
  std::unique_ptr<Boosting> boosting_;
883
  std::unique_ptr<SingleRowPredictorInner> single_row_predictor_[PREDICTOR_TYPES];
884

Guolin Ke's avatar
Guolin Ke committed
885
  /*! \brief All configs */
Guolin Ke's avatar
Guolin Ke committed
886
  Config config_;
Guolin Ke's avatar
Guolin Ke committed
887
  /*! \brief Metric for training data */
Guolin Ke's avatar
Guolin Ke committed
888
  std::vector<std::unique_ptr<Metric>> train_metric_;
Guolin Ke's avatar
Guolin Ke committed
889
  /*! \brief Metrics for validation data */
Guolin Ke's avatar
Guolin Ke committed
890
  std::vector<std::vector<std::unique_ptr<Metric>>> valid_metrics_;
Guolin Ke's avatar
Guolin Ke committed
891
  /*! \brief Training objective function */
Guolin Ke's avatar
Guolin Ke committed
892
  std::unique_ptr<ObjectiveFunction> objective_fun_;
wxchan's avatar
wxchan committed
893
  /*! \brief mutex for threading safe call */
894
  mutable yamc::alternate::shared_mutex mutex_;
Guolin Ke's avatar
Guolin Ke committed
895
896
};

897
}  // namespace LightGBM
Guolin Ke's avatar
Guolin Ke committed
898

899
900
// explicitly declare symbols from LightGBM namespace
using LightGBM::AllgatherFunction;
901
using LightGBM::ArrowChunkedArray;
902
using LightGBM::ArrowTable;
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
using LightGBM::Booster;
using LightGBM::Common::CheckElementsIntervalClosed;
using LightGBM::Common::RemoveQuotationSymbol;
using LightGBM::Common::Vector2Ptr;
using LightGBM::Common::VectorSize;
using LightGBM::Config;
using LightGBM::data_size_t;
using LightGBM::Dataset;
using LightGBM::DatasetLoader;
using LightGBM::kZeroThreshold;
using LightGBM::LGBM_APIHandleException;
using LightGBM::Log;
using LightGBM::Network;
using LightGBM::Random;
using LightGBM::ReduceScatterFunction;
918
using LightGBM::SingleRowPredictor;
Guolin Ke's avatar
Guolin Ke committed
919

Guolin Ke's avatar
Guolin Ke committed
920
921
922
923
924
925
926
927
// some help functions used to convert data

std::function<std::vector<double>(int row_idx)>
RowFunctionFromDenseMatric(const void* data, int num_row, int num_col, int data_type, int is_row_major);

std::function<std::vector<std::pair<int, double>>(int row_idx)>
RowPairFunctionFromDenseMatric(const void* data, int num_row, int num_col, int data_type, int is_row_major);

928
929
930
std::function<std::vector<std::pair<int, double>>(int row_idx)>
RowPairFunctionFromDenseRows(const void** data, int num_col, int data_type);

931
932
template<typename T>
std::function<std::vector<std::pair<int, double>>(T idx)>
Guolin Ke's avatar
Guolin Ke committed
933
RowFunctionFromCSR(const void* indptr, int indptr_type, const int32_t* indices,
934
                   const void* data, int data_type, int64_t nindptr, int64_t nelem);
Guolin Ke's avatar
Guolin Ke committed
935
936
937

// Row iterator of on column for CSC matrix
class CSC_RowIterator {
Nikita Titov's avatar
Nikita Titov committed
938
 public:
Guolin Ke's avatar
Guolin Ke committed
939
  CSC_RowIterator(const void* col_ptr, int col_ptr_type, const int32_t* indices,
940
                  const void* data, int data_type, int64_t ncol_ptr, int64_t nelem, int col_idx);
Guolin Ke's avatar
Guolin Ke committed
941
942
943
944
945
  ~CSC_RowIterator() {}
  // return value at idx, only can access by ascent order
  double Get(int idx);
  // return next non-zero pair, if index < 0, means no more data
  std::pair<int, double> NextNonZero();
Nikita Titov's avatar
Nikita Titov committed
946
947

 private:
Guolin Ke's avatar
Guolin Ke committed
948
949
950
951
952
953
954
955
956
  int nonzero_idx_ = 0;
  int cur_idx_ = -1;
  double cur_val_ = 0.0f;
  bool is_end_ = false;
  std::function<std::pair<int, double>(int idx)> iter_fun_;
};

// start of c_api functions

Guolin Ke's avatar
Guolin Ke committed
957
const char* LGBM_GetLastError() {
wxchan's avatar
wxchan committed
958
  return LastErrorMsg();
Guolin Ke's avatar
Guolin Ke committed
959
960
}

961
962
963
964
965
966
967
968
969
970
971
972
int LGBM_DumpParamAliases(int64_t buffer_len,
                          int64_t* out_len,
                          char* out_str) {
  API_BEGIN();
  std::string aliases = Config::DumpAliases();
  *out_len = static_cast<int64_t>(aliases.size()) + 1;
  if (*out_len <= buffer_len) {
    std::memcpy(out_str, aliases.c_str(), *out_len);
  }
  API_END();
}

973
974
975
976
977
978
int LGBM_RegisterLogCallback(void (*callback)(const char*)) {
  API_BEGIN();
  Log::ResetCallBack(callback);
  API_END();
}

979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
static inline int SampleCount(int32_t total_nrow, const Config& config) {
  return static_cast<int>(total_nrow < config.bin_construct_sample_cnt ? total_nrow : config.bin_construct_sample_cnt);
}

static inline std::vector<int32_t> CreateSampleIndices(int32_t total_nrow, const Config& config) {
  Random rand(config.data_random_seed);
  int sample_cnt = SampleCount(total_nrow, config);
  return rand.Sample(total_nrow, sample_cnt);
}

int LGBM_GetSampleCount(int32_t num_total_row,
                        const char* parameters,
                        int* out) {
  API_BEGIN();
  if (out == nullptr) {
    Log::Fatal("LGBM_GetSampleCount output is nullptr");
  }
  auto param = Config::Str2Map(parameters);
  Config config;
  config.Set(param);

  *out = SampleCount(num_total_row, config);
  API_END();
}

int LGBM_SampleIndices(int32_t num_total_row,
                       const char* parameters,
                       void* out,
                       int32_t* out_len) {
  // This API is to keep python binding's behavior the same with C++ implementation.
  // Sample count, random seed etc. should be provided in parameters.
  API_BEGIN();
  if (out == nullptr) {
    Log::Fatal("LGBM_SampleIndices output is nullptr");
  }
  auto param = Config::Str2Map(parameters);
  Config config;
  config.Set(param);

  auto sample_indices = CreateSampleIndices(num_total_row, config);
  memcpy(out, sample_indices.data(), sizeof(int32_t) * sample_indices.size());
  *out_len = static_cast<int32_t>(sample_indices.size());
  API_END();
}

1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
int LGBM_ByteBufferGetAt(ByteBufferHandle handle, int32_t index, uint8_t* out_val) {
  API_BEGIN();
  LightGBM::ByteBuffer* byteBuffer = reinterpret_cast<LightGBM::ByteBuffer*>(handle);
  *out_val = byteBuffer->GetAt(index);
  API_END();
}

int LGBM_ByteBufferFree(ByteBufferHandle handle) {
  API_BEGIN();
  delete reinterpret_cast<LightGBM::ByteBuffer*>(handle);
  API_END();
}

Guolin Ke's avatar
Guolin Ke committed
1037
int LGBM_DatasetCreateFromFile(const char* filename,
1038
1039
1040
                               const char* parameters,
                               const DatasetHandle reference,
                               DatasetHandle* out) {
1041
  API_BEGIN();
Guolin Ke's avatar
Guolin Ke committed
1042
1043
  auto param = Config::Str2Map(parameters);
  Config config;
1044
  config.Set(param);
1045
  OMP_SET_NUM_THREADS(config.num_threads);
1046
  DatasetLoader loader(config, nullptr, 1, filename);
Guolin Ke's avatar
Guolin Ke committed
1047
  if (reference == nullptr) {
1048
    if (Network::num_machines() == 1) {
1049
      *out = loader.LoadFromFile(filename);
1050
    } else {
1051
      *out = loader.LoadFromFile(filename, Network::rank(), Network::num_machines());
1052
    }
Guolin Ke's avatar
Guolin Ke committed
1053
  } else {
1054
    *out = loader.LoadFromFileAlignWithOtherDataset(filename,
1055
                                                    reinterpret_cast<const Dataset*>(reference));
Guolin Ke's avatar
Guolin Ke committed
1056
  }
1057
  API_END();
Guolin Ke's avatar
Guolin Ke committed
1058
1059
}

Guolin Ke's avatar
Guolin Ke committed
1060
int LGBM_DatasetCreateFromSampledColumn(double** sample_data,
1061
1062
1063
1064
                                        int** sample_indices,
                                        int32_t ncol,
                                        const int* num_per_col,
                                        int32_t num_sample_row,
1065
1066
                                        int32_t num_local_row,
                                        int64_t num_dist_row,
1067
1068
                                        const char* parameters,
                                        DatasetHandle* out) {
1069
  API_BEGIN();
Guolin Ke's avatar
Guolin Ke committed
1070
1071
  auto param = Config::Str2Map(parameters);
  Config config;
1072
  config.Set(param);
1073
  OMP_SET_NUM_THREADS(config.num_threads);
Guolin Ke's avatar
Guolin Ke committed
1074
  DatasetLoader loader(config, nullptr, 1, nullptr);
1075
1076
1077
1078
  *out = loader.ConstructFromSampleData(sample_data,
                                        sample_indices,
                                        ncol,
                                        num_per_col,
1079
                                        num_sample_row,
1080
1081
                                        static_cast<data_size_t>(num_local_row),
                                        num_dist_row);
1082
  API_END();
Guolin Ke's avatar
Guolin Ke committed
1083
1084
}

Guolin Ke's avatar
Guolin Ke committed
1085
int LGBM_DatasetCreateByReference(const DatasetHandle reference,
1086
1087
                                  int64_t num_total_row,
                                  DatasetHandle* out) {
Guolin Ke's avatar
Guolin Ke committed
1088
1089
  API_BEGIN();
  std::unique_ptr<Dataset> ret;
1090
1091
1092
1093
1094
  data_size_t nrows = static_cast<data_size_t>(num_total_row);
  ret.reset(new Dataset(nrows));
  const Dataset* reference_dataset = reinterpret_cast<const Dataset*>(reference);
  ret->CreateValid(reference_dataset);
  ret->InitByReference(nrows, reference_dataset);
Guolin Ke's avatar
Guolin Ke committed
1095
1096
1097
1098
  *out = ret.release();
  API_END();
}

1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
int LGBM_DatasetCreateFromSerializedReference(const void* ref_buffer,
                                              int32_t ref_buffer_size,
                                              int64_t num_row,
                                              int32_t num_classes,
                                              const char* parameters,
                                              DatasetHandle* out) {
  API_BEGIN();
  auto param = Config::Str2Map(parameters);
  Config config;
  config.Set(param);
  OMP_SET_NUM_THREADS(config.num_threads);
  DatasetLoader loader(config, nullptr, 1, nullptr);
  *out = loader.LoadFromSerializedReference(static_cast<const char*>(ref_buffer),
    static_cast<size_t>(ref_buffer_size),
    static_cast<data_size_t>(num_row),
    num_classes);
  API_END();
}

1118
1119
1120
1121
1122
int LGBM_DatasetInitStreaming(DatasetHandle dataset,
                              int32_t has_weights,
                              int32_t has_init_scores,
                              int32_t has_queries,
                              int32_t nclasses,
1123
1124
                              int32_t nthreads,
                              int32_t omp_max_threads) {
1125
1126
1127
  API_BEGIN();
  auto p_dataset = reinterpret_cast<Dataset*>(dataset);
  auto num_data = p_dataset->num_data();
1128
  p_dataset->InitStreaming(num_data, has_weights, has_init_scores, has_queries, nclasses, nthreads, omp_max_threads);
1129
1130
1131
1132
  p_dataset->set_wait_for_manual_finish(true);
  API_END();
}

Guolin Ke's avatar
Guolin Ke committed
1133
int LGBM_DatasetPushRows(DatasetHandle dataset,
1134
1135
1136
1137
1138
                         const void* data,
                         int data_type,
                         int32_t nrow,
                         int32_t ncol,
                         int32_t start_row) {
Guolin Ke's avatar
Guolin Ke committed
1139
1140
1141
  API_BEGIN();
  auto p_dataset = reinterpret_cast<Dataset*>(dataset);
  auto get_row_fun = RowFunctionFromDenseMatric(data, nrow, ncol, data_type, 1);
1142
1143
1144
  if (p_dataset->has_raw()) {
    p_dataset->ResizeRaw(p_dataset->num_numeric_features() + nrow);
  }
1145
  OMP_INIT_EX();
1146
  #pragma omp parallel for num_threads(OMP_NUM_THREADS()) schedule(static)
Guolin Ke's avatar
Guolin Ke committed
1147
  for (int i = 0; i < nrow; ++i) {
1148
    OMP_LOOP_EX_BEGIN();
Guolin Ke's avatar
Guolin Ke committed
1149
1150
1151
    const int tid = omp_get_thread_num();
    auto one_row = get_row_fun(i);
    p_dataset->PushOneRow(tid, start_row + i, one_row);
1152
    OMP_LOOP_EX_END();
Guolin Ke's avatar
Guolin Ke committed
1153
  }
1154
  OMP_THROW_EX();
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
  if (!p_dataset->wait_for_manual_finish() && (start_row + nrow == p_dataset->num_data())) {
    p_dataset->FinishLoad();
  }
  API_END();
}

int LGBM_DatasetPushRowsWithMetadata(DatasetHandle dataset,
                                     const void* data,
                                     int data_type,
                                     int32_t nrow,
                                     int32_t ncol,
                                     int32_t start_row,
                                     const float* labels,
                                     const float* weights,
                                     const double* init_scores,
                                     const int32_t* queries,
                                     int32_t tid) {
  API_BEGIN();
#ifdef LABEL_T_USE_DOUBLE
  Log::Fatal("Don't support LABEL_T_USE_DOUBLE");
#endif
  if (!data) {
    Log::Fatal("data cannot be null.");
  }
  auto p_dataset = reinterpret_cast<Dataset*>(dataset);
  auto get_row_fun = RowFunctionFromDenseMatric(data, nrow, ncol, data_type, 1);
  if (p_dataset->has_raw()) {
    p_dataset->ResizeRaw(p_dataset->num_numeric_features() + nrow);
  }

1185
1186
  const int max_omp_threads = p_dataset->omp_max_threads() > 0 ? p_dataset->omp_max_threads() : OMP_NUM_THREADS();

1187
  OMP_INIT_EX();
1188
#pragma omp parallel for num_threads(OMP_NUM_THREADS()) schedule(static)
1189
1190
1191
  for (int i = 0; i < nrow; ++i) {
    OMP_LOOP_EX_BEGIN();
    // convert internal thread id to be unique based on external thread id
1192
    const int internal_tid = omp_get_thread_num() + (max_omp_threads * tid);
1193
1194
1195
1196
1197
1198
1199
1200
1201
    auto one_row = get_row_fun(i);
    p_dataset->PushOneRow(internal_tid, start_row + i, one_row);
    OMP_LOOP_EX_END();
  }
  OMP_THROW_EX();

  p_dataset->InsertMetadataAt(start_row, nrow, labels, weights, init_scores, queries);

  if (!p_dataset->wait_for_manual_finish() && (start_row + nrow == p_dataset->num_data())) {
Guolin Ke's avatar
Guolin Ke committed
1202
1203
1204
1205
1206
    p_dataset->FinishLoad();
  }
  API_END();
}

Guolin Ke's avatar
Guolin Ke committed
1207
int LGBM_DatasetPushRowsByCSR(DatasetHandle dataset,
1208
1209
1210
1211
1212
1213
1214
1215
1216
                              const void* indptr,
                              int indptr_type,
                              const int32_t* indices,
                              const void* data,
                              int data_type,
                              int64_t nindptr,
                              int64_t nelem,
                              int64_t,
                              int64_t start_row) {
Guolin Ke's avatar
Guolin Ke committed
1217
1218
  API_BEGIN();
  auto p_dataset = reinterpret_cast<Dataset*>(dataset);
1219
  auto get_row_fun = RowFunctionFromCSR<int>(indptr, indptr_type, indices, data, data_type, nindptr, nelem);
Guolin Ke's avatar
Guolin Ke committed
1220
  int32_t nrow = static_cast<int32_t>(nindptr - 1);
1221
1222
1223
  if (p_dataset->has_raw()) {
    p_dataset->ResizeRaw(p_dataset->num_numeric_features() + nrow);
  }
1224
  OMP_INIT_EX();
1225
  #pragma omp parallel for num_threads(OMP_NUM_THREADS()) schedule(static)
Guolin Ke's avatar
Guolin Ke committed
1226
  for (int i = 0; i < nrow; ++i) {
1227
    OMP_LOOP_EX_BEGIN();
Guolin Ke's avatar
Guolin Ke committed
1228
1229
    const int tid = omp_get_thread_num();
    auto one_row = get_row_fun(i);
1230
    p_dataset->PushOneRow(tid, static_cast<data_size_t>(start_row + i), one_row);
1231
    OMP_LOOP_EX_END();
Guolin Ke's avatar
Guolin Ke committed
1232
  }
1233
  OMP_THROW_EX();
1234
  if (!p_dataset->wait_for_manual_finish() && (start_row + nrow == static_cast<int64_t>(p_dataset->num_data()))) {
Guolin Ke's avatar
Guolin Ke committed
1235
1236
    p_dataset->FinishLoad();
  }
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
  API_END();
}

int LGBM_DatasetPushRowsByCSRWithMetadata(DatasetHandle dataset,
                                          const void* indptr,
                                          int indptr_type,
                                          const int32_t* indices,
                                          const void* data,
                                          int data_type,
                                          int64_t nindptr,
                                          int64_t nelem,
                                          int64_t start_row,
                                          const float* labels,
                                          const float* weights,
                                          const double* init_scores,
                                          const int32_t* queries,
                                          int32_t tid) {
  API_BEGIN();
#ifdef LABEL_T_USE_DOUBLE
  Log::Fatal("Don't support LABEL_T_USE_DOUBLE");
#endif
  if (!data) {
    Log::Fatal("data cannot be null.");
  }
  auto p_dataset = reinterpret_cast<Dataset*>(dataset);
  auto get_row_fun = RowFunctionFromCSR<int>(indptr, indptr_type, indices, data, data_type, nindptr, nelem);
  int32_t nrow = static_cast<int32_t>(nindptr - 1);
  if (p_dataset->has_raw()) {
    p_dataset->ResizeRaw(p_dataset->num_numeric_features() + nrow);
  }
1267
1268
1269

  const int max_omp_threads = p_dataset->omp_max_threads() > 0 ? p_dataset->omp_max_threads() : OMP_NUM_THREADS();

1270
  OMP_INIT_EX();
1271
#pragma omp parallel for num_threads(OMP_NUM_THREADS()) schedule(static)
1272
1273
1274
  for (int i = 0; i < nrow; ++i) {
    OMP_LOOP_EX_BEGIN();
    // convert internal thread id to be unique based on external thread id
1275
    const int internal_tid = omp_get_thread_num() + (max_omp_threads * tid);
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
    auto one_row = get_row_fun(i);
    p_dataset->PushOneRow(internal_tid, static_cast<data_size_t>(start_row + i), one_row);
    OMP_LOOP_EX_END();
  }
  OMP_THROW_EX();

  p_dataset->InsertMetadataAt(static_cast<int32_t>(start_row), nrow, labels, weights, init_scores, queries);

  if (!p_dataset->wait_for_manual_finish() && (start_row + nrow == static_cast<int64_t>(p_dataset->num_data()))) {
    p_dataset->FinishLoad();
  }
  API_END();
}

int LGBM_DatasetSetWaitForManualFinish(DatasetHandle dataset, int wait) {
  API_BEGIN();
  auto p_dataset = reinterpret_cast<Dataset*>(dataset);
  p_dataset->set_wait_for_manual_finish(wait);
  API_END();
}

int LGBM_DatasetMarkFinished(DatasetHandle dataset) {
  API_BEGIN();
  auto p_dataset = reinterpret_cast<Dataset*>(dataset);
  p_dataset->FinishLoad();
Guolin Ke's avatar
Guolin Ke committed
1301
1302
1303
  API_END();
}

Guolin Ke's avatar
Guolin Ke committed
1304
int LGBM_DatasetCreateFromMat(const void* data,
1305
1306
1307
1308
1309
1310
1311
                              int data_type,
                              int32_t nrow,
                              int32_t ncol,
                              int is_row_major,
                              const char* parameters,
                              const DatasetHandle reference,
                              DatasetHandle* out) {
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
  return LGBM_DatasetCreateFromMats(1,
                                    &data,
                                    data_type,
                                    &nrow,
                                    ncol,
                                    is_row_major,
                                    parameters,
                                    reference,
                                    out);
}

int LGBM_DatasetCreateFromMats(int32_t nmat,
                               const void** data,
                               int data_type,
                               int32_t* nrow,
                               int32_t ncol,
                               int is_row_major,
                               const char* parameters,
                               const DatasetHandle reference,
                               DatasetHandle* out) {
1332
  API_BEGIN();
Guolin Ke's avatar
Guolin Ke committed
1333
1334
  auto param = Config::Str2Map(parameters);
  Config config;
1335
  config.Set(param);
1336
  OMP_SET_NUM_THREADS(config.num_threads);
Guolin Ke's avatar
Guolin Ke committed
1337
  std::unique_ptr<Dataset> ret;
1338
1339
1340
1341
1342
1343
1344
1345
1346
  int32_t total_nrow = 0;
  for (int j = 0; j < nmat; ++j) {
    total_nrow += nrow[j];
  }

  std::vector<std::function<std::vector<double>(int row_idx)>> get_row_fun;
  for (int j = 0; j < nmat; ++j) {
    get_row_fun.push_back(RowFunctionFromDenseMatric(data[j], nrow[j], ncol, data_type, is_row_major));
  }
1347

Guolin Ke's avatar
Guolin Ke committed
1348
1349
  if (reference == nullptr) {
    // sample data first
1350
1351
    auto sample_indices = CreateSampleIndices(total_nrow, config);
    int sample_cnt = static_cast<int>(sample_indices.size());
1352
    std::vector<std::vector<double>> sample_values(ncol);
Guolin Ke's avatar
Guolin Ke committed
1353
    std::vector<std::vector<int>> sample_idx(ncol);
1354
1355
1356

    int offset = 0;
    int j = 0;
Guolin Ke's avatar
Guolin Ke committed
1357
    for (size_t i = 0; i < sample_indices.size(); ++i) {
Guolin Ke's avatar
Guolin Ke committed
1358
      auto idx = sample_indices[i];
1359
1360
1361
1362
      while ((idx - offset) >= nrow[j]) {
        offset += nrow[j];
        ++j;
      }
1363

1364
1365
1366
1367
1368
      auto row = get_row_fun[j](static_cast<int>(idx - offset));
      for (size_t k = 0; k < row.size(); ++k) {
        if (std::fabs(row[k]) > kZeroThreshold || std::isnan(row[k])) {
          sample_values[k].emplace_back(row[k]);
          sample_idx[k].emplace_back(static_cast<int>(i));
Guolin Ke's avatar
Guolin Ke committed
1369
        }
Guolin Ke's avatar
Guolin Ke committed
1370
1371
      }
    }
Guolin Ke's avatar
Guolin Ke committed
1372
    DatasetLoader loader(config, nullptr, 1, nullptr);
1373
1374
1375
1376
    ret.reset(loader.ConstructFromSampleData(Vector2Ptr<double>(&sample_values).data(),
                                             Vector2Ptr<int>(&sample_idx).data(),
                                             ncol,
                                             VectorSize<double>(sample_values).data(),
1377
1378
1379
                                             sample_cnt,
                                             total_nrow,
                                             total_nrow));
Guolin Ke's avatar
Guolin Ke committed
1380
  } else {
1381
    ret.reset(new Dataset(total_nrow));
Guolin Ke's avatar
Guolin Ke committed
1382
    ret->CreateValid(
1383
      reinterpret_cast<const Dataset*>(reference));
1384
1385
1386
    if (ret->has_raw()) {
      ret->ResizeRaw(total_nrow);
    }
Guolin Ke's avatar
Guolin Ke committed
1387
  }
1388
1389
1390
  int32_t start_row = 0;
  for (int j = 0; j < nmat; ++j) {
    OMP_INIT_EX();
1391
    #pragma omp parallel for num_threads(OMP_NUM_THREADS()) schedule(static)
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
    for (int i = 0; i < nrow[j]; ++i) {
      OMP_LOOP_EX_BEGIN();
      const int tid = omp_get_thread_num();
      auto one_row = get_row_fun[j](i);
      ret->PushOneRow(tid, start_row + i, one_row);
      OMP_LOOP_EX_END();
    }
    OMP_THROW_EX();

    start_row += nrow[j];
Guolin Ke's avatar
Guolin Ke committed
1402
1403
  }
  ret->FinishLoad();
Guolin Ke's avatar
Guolin Ke committed
1404
  *out = ret.release();
1405
  API_END();
1406
1407
}

Guolin Ke's avatar
Guolin Ke committed
1408
int LGBM_DatasetCreateFromCSR(const void* indptr,
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
                              int indptr_type,
                              const int32_t* indices,
                              const void* data,
                              int data_type,
                              int64_t nindptr,
                              int64_t nelem,
                              int64_t num_col,
                              const char* parameters,
                              const DatasetHandle reference,
                              DatasetHandle* out) {
1419
  API_BEGIN();
1420
1421
1422
1423
1424
  if (num_col <= 0) {
    Log::Fatal("The number of columns should be greater than zero.");
  } else if (num_col >= INT32_MAX) {
    Log::Fatal("The number of columns should be smaller than INT32_MAX.");
  }
Guolin Ke's avatar
Guolin Ke committed
1425
1426
  auto param = Config::Str2Map(parameters);
  Config config;
1427
  config.Set(param);
1428
  OMP_SET_NUM_THREADS(config.num_threads);
Guolin Ke's avatar
Guolin Ke committed
1429
  std::unique_ptr<Dataset> ret;
1430
  auto get_row_fun = RowFunctionFromCSR<int>(indptr, indptr_type, indices, data, data_type, nindptr, nelem);
1431
1432
1433
  int32_t nrow = static_cast<int32_t>(nindptr - 1);
  if (reference == nullptr) {
    // sample data first
1434
1435
    auto sample_indices = CreateSampleIndices(nrow, config);
    int sample_cnt = static_cast<int>(sample_indices.size());
Guolin Ke's avatar
Guolin Ke committed
1436
1437
    std::vector<std::vector<double>> sample_values(num_col);
    std::vector<std::vector<int>> sample_idx(num_col);
1438
1439
1440
1441
    for (size_t i = 0; i < sample_indices.size(); ++i) {
      auto idx = sample_indices[i];
      auto row = get_row_fun(static_cast<int>(idx));
      for (std::pair<int, double>& inner_data : row) {
Nikita Titov's avatar
Nikita Titov committed
1442
        CHECK_LT(inner_data.first, num_col);
Guolin Ke's avatar
Guolin Ke committed
1443
        if (std::fabs(inner_data.second) > kZeroThreshold || std::isnan(inner_data.second)) {
Guolin Ke's avatar
Guolin Ke committed
1444
1445
          sample_values[inner_data.first].emplace_back(inner_data.second);
          sample_idx[inner_data.first].emplace_back(static_cast<int>(i));
1446
1447
1448
        }
      }
    }
Guolin Ke's avatar
Guolin Ke committed
1449
    DatasetLoader loader(config, nullptr, 1, nullptr);
1450
1451
1452
1453
    ret.reset(loader.ConstructFromSampleData(Vector2Ptr<double>(&sample_values).data(),
                                             Vector2Ptr<int>(&sample_idx).data(),
                                             static_cast<int>(num_col),
                                             VectorSize<double>(sample_values).data(),
1454
1455
1456
                                             sample_cnt,
                                             nrow,
                                             nrow));
1457
  } else {
1458
    ret.reset(new Dataset(nrow));
Guolin Ke's avatar
Guolin Ke committed
1459
    ret->CreateValid(
1460
      reinterpret_cast<const Dataset*>(reference));
1461
1462
1463
    if (ret->has_raw()) {
      ret->ResizeRaw(nrow);
    }
1464
  }
1465
  OMP_INIT_EX();
1466
  #pragma omp parallel for num_threads(OMP_NUM_THREADS()) schedule(static)
1467
  for (int i = 0; i < nindptr - 1; ++i) {
1468
    OMP_LOOP_EX_BEGIN();
1469
1470
1471
    const int tid = omp_get_thread_num();
    auto one_row = get_row_fun(i);
    ret->PushOneRow(tid, i, one_row);
1472
    OMP_LOOP_EX_END();
1473
  }
1474
  OMP_THROW_EX();
1475
  ret->FinishLoad();
Guolin Ke's avatar
Guolin Ke committed
1476
  *out = ret.release();
1477
  API_END();
1478
1479
}

1480
int LGBM_DatasetCreateFromCSRFunc(void* get_row_funptr,
1481
1482
1483
1484
1485
                                  int num_rows,
                                  int64_t num_col,
                                  const char* parameters,
                                  const DatasetHandle reference,
                                  DatasetHandle* out) {
1486
  API_BEGIN();
1487
1488
1489
1490
1491
  if (num_col <= 0) {
    Log::Fatal("The number of columns should be greater than zero.");
  } else if (num_col >= INT32_MAX) {
    Log::Fatal("The number of columns should be smaller than INT32_MAX.");
  }
1492
1493
1494
1495
  auto get_row_fun = *static_cast<std::function<void(int idx, std::vector<std::pair<int, double>>&)>*>(get_row_funptr);
  auto param = Config::Str2Map(parameters);
  Config config;
  config.Set(param);
1496
  OMP_SET_NUM_THREADS(config.num_threads);
1497
1498
1499
1500
  std::unique_ptr<Dataset> ret;
  int32_t nrow = num_rows;
  if (reference == nullptr) {
    // sample data first
1501
1502
    auto sample_indices = CreateSampleIndices(nrow, config);
    int sample_cnt = static_cast<int>(sample_indices.size());
1503
1504
1505
1506
1507
1508
1509
1510
    std::vector<std::vector<double>> sample_values(num_col);
    std::vector<std::vector<int>> sample_idx(num_col);
    // local buffer to re-use memory
    std::vector<std::pair<int, double>> buffer;
    for (size_t i = 0; i < sample_indices.size(); ++i) {
      auto idx = sample_indices[i];
      get_row_fun(static_cast<int>(idx), buffer);
      for (std::pair<int, double>& inner_data : buffer) {
Nikita Titov's avatar
Nikita Titov committed
1511
        CHECK_LT(inner_data.first, num_col);
1512
1513
1514
1515
1516
1517
1518
        if (std::fabs(inner_data.second) > kZeroThreshold || std::isnan(inner_data.second)) {
          sample_values[inner_data.first].emplace_back(inner_data.second);
          sample_idx[inner_data.first].emplace_back(static_cast<int>(i));
        }
      }
    }
    DatasetLoader loader(config, nullptr, 1, nullptr);
1519
1520
1521
1522
    ret.reset(loader.ConstructFromSampleData(Vector2Ptr<double>(&sample_values).data(),
                                             Vector2Ptr<int>(&sample_idx).data(),
                                             static_cast<int>(num_col),
                                             VectorSize<double>(sample_values).data(),
1523
1524
1525
                                             sample_cnt,
                                             nrow,
                                             nrow));
1526
1527
1528
1529
  } else {
    ret.reset(new Dataset(nrow));
    ret->CreateValid(
      reinterpret_cast<const Dataset*>(reference));
1530
1531
1532
    if (ret->has_raw()) {
      ret->ResizeRaw(nrow);
    }
1533
  }
1534

1535
  OMP_INIT_EX();
Guolin Ke's avatar
Guolin Ke committed
1536
  std::vector<std::pair<int, double>> thread_buffer;
1537
  #pragma omp parallel for num_threads(OMP_NUM_THREADS()) schedule(static) private(thread_buffer)
1538
1539
1540
  for (int i = 0; i < num_rows; ++i) {
    OMP_LOOP_EX_BEGIN();
    {
1541
      const int tid = omp_get_thread_num();
Guolin Ke's avatar
Guolin Ke committed
1542
1543
      get_row_fun(i, thread_buffer);
      ret->PushOneRow(tid, i, thread_buffer);
1544
1545
1546
1547
1548
1549
1550
1551
1552
    }
    OMP_LOOP_EX_END();
  }
  OMP_THROW_EX();
  ret->FinishLoad();
  *out = ret.release();
  API_END();
}

Guolin Ke's avatar
Guolin Ke committed
1553
int LGBM_DatasetCreateFromCSC(const void* col_ptr,
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
                              int col_ptr_type,
                              const int32_t* indices,
                              const void* data,
                              int data_type,
                              int64_t ncol_ptr,
                              int64_t nelem,
                              int64_t num_row,
                              const char* parameters,
                              const DatasetHandle reference,
                              DatasetHandle* out) {
1564
  API_BEGIN();
Guolin Ke's avatar
Guolin Ke committed
1565
1566
  auto param = Config::Str2Map(parameters);
  Config config;
1567
  config.Set(param);
1568
  OMP_SET_NUM_THREADS(config.num_threads);
Guolin Ke's avatar
Guolin Ke committed
1569
  std::unique_ptr<Dataset> ret;
Guolin Ke's avatar
Guolin Ke committed
1570
1571
1572
  int32_t nrow = static_cast<int32_t>(num_row);
  if (reference == nullptr) {
    // sample data first
1573
1574
    auto sample_indices = CreateSampleIndices(nrow, config);
    int sample_cnt = static_cast<int>(sample_indices.size());
Guolin Ke's avatar
Guolin Ke committed
1575
    std::vector<std::vector<double>> sample_values(ncol_ptr - 1);
Guolin Ke's avatar
Guolin Ke committed
1576
    std::vector<std::vector<int>> sample_idx(ncol_ptr - 1);
1577
    OMP_INIT_EX();
1578
    #pragma omp parallel for num_threads(OMP_NUM_THREADS()) schedule(static)
Guolin Ke's avatar
Guolin Ke committed
1579
    for (int i = 0; i < static_cast<int>(sample_values.size()); ++i) {
1580
      OMP_LOOP_EX_BEGIN();
Guolin Ke's avatar
Guolin Ke committed
1581
1582
1583
      CSC_RowIterator col_it(col_ptr, col_ptr_type, indices, data, data_type, ncol_ptr, nelem, i);
      for (int j = 0; j < sample_cnt; j++) {
        auto val = col_it.Get(sample_indices[j]);
Guolin Ke's avatar
Guolin Ke committed
1584
        if (std::fabs(val) > kZeroThreshold || std::isnan(val)) {
Guolin Ke's avatar
Guolin Ke committed
1585
1586
          sample_values[i].emplace_back(val);
          sample_idx[i].emplace_back(j);
Guolin Ke's avatar
Guolin Ke committed
1587
1588
        }
      }
1589
      OMP_LOOP_EX_END();
Guolin Ke's avatar
Guolin Ke committed
1590
    }
1591
    OMP_THROW_EX();
Guolin Ke's avatar
Guolin Ke committed
1592
    DatasetLoader loader(config, nullptr, 1, nullptr);
1593
1594
1595
1596
    ret.reset(loader.ConstructFromSampleData(Vector2Ptr<double>(&sample_values).data(),
                                             Vector2Ptr<int>(&sample_idx).data(),
                                             static_cast<int>(sample_values.size()),
                                             VectorSize<double>(sample_values).data(),
1597
1598
1599
                                             sample_cnt,
                                             nrow,
                                             nrow));
Guolin Ke's avatar
Guolin Ke committed
1600
  } else {
1601
    ret.reset(new Dataset(nrow));
Guolin Ke's avatar
Guolin Ke committed
1602
    ret->CreateValid(
1603
      reinterpret_cast<const Dataset*>(reference));
Guolin Ke's avatar
Guolin Ke committed
1604
  }
1605
  OMP_INIT_EX();
1606
  #pragma omp parallel for num_threads(OMP_NUM_THREADS()) schedule(static)
Guolin Ke's avatar
Guolin Ke committed
1607
  for (int i = 0; i < ncol_ptr - 1; ++i) {
1608
    OMP_LOOP_EX_BEGIN();
Guolin Ke's avatar
Guolin Ke committed
1609
    const int tid = omp_get_thread_num();
Guolin Ke's avatar
Guolin Ke committed
1610
    int feature_idx = ret->InnerFeatureIndex(i);
Guolin Ke's avatar
Guolin Ke committed
1611
    if (feature_idx < 0) { continue; }
Guolin Ke's avatar
Guolin Ke committed
1612
1613
    int group = ret->Feature2Group(feature_idx);
    int sub_feature = ret->Feture2SubFeature(feature_idx);
Guolin Ke's avatar
Guolin Ke committed
1614
    CSC_RowIterator col_it(col_ptr, col_ptr_type, indices, data, data_type, ncol_ptr, nelem, i);
Guolin Ke's avatar
Guolin Ke committed
1615
1616
1617
1618
1619
1620
1621
1622
    auto bin_mapper = ret->FeatureBinMapper(feature_idx);
    if (bin_mapper->GetDefaultBin() == bin_mapper->GetMostFreqBin()) {
      int row_idx = 0;
      while (row_idx < nrow) {
        auto pair = col_it.NextNonZero();
        row_idx = pair.first;
        // no more data
        if (row_idx < 0) { break; }
1623
        ret->PushOneData(tid, row_idx, group, feature_idx, sub_feature, pair.second);
Guolin Ke's avatar
Guolin Ke committed
1624
1625
1626
1627
      }
    } else {
      for (int row_idx = 0; row_idx < nrow; ++row_idx) {
        auto val = col_it.Get(row_idx);
1628
        ret->PushOneData(tid, row_idx, group, feature_idx, sub_feature, val);
Guolin Ke's avatar
Guolin Ke committed
1629
      }
Guolin Ke's avatar
Guolin Ke committed
1630
    }
1631
    OMP_LOOP_EX_END();
Guolin Ke's avatar
Guolin Ke committed
1632
  }
1633
  OMP_THROW_EX();
Guolin Ke's avatar
Guolin Ke committed
1634
  ret->FinishLoad();
Guolin Ke's avatar
Guolin Ke committed
1635
  *out = ret.release();
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
  API_END();
}

int LGBM_DatasetCreateFromArrow(int64_t n_chunks,
                                const ArrowArray* chunks,
                                const ArrowSchema* schema,
                                const char* parameters,
                                const DatasetHandle reference,
                                DatasetHandle *out) {
  API_BEGIN();

  auto param = Config::Str2Map(parameters);
  Config config;
  config.Set(param);
  OMP_SET_NUM_THREADS(config.num_threads);

  std::unique_ptr<Dataset> ret;

  // Prepare the Arrow data
  ArrowTable table(n_chunks, chunks, schema);

  // Initialize the dataset
  if (reference == nullptr) {
    // If there is no reference dataset, we first sample indices
    auto sample_indices = CreateSampleIndices(static_cast<int32_t>(table.get_num_rows()), config);
    auto sample_count = static_cast<int>(sample_indices.size());
    std::vector<std::vector<double>> sample_values(table.get_num_columns());
    std::vector<std::vector<int>> sample_idx(table.get_num_columns());

    // Then, we obtain sample values by parallelizing across columns
    OMP_INIT_EX();
    #pragma omp parallel for num_threads(OMP_NUM_THREADS()) schedule(static)
    for (int64_t j = 0; j < table.get_num_columns(); ++j) {
      OMP_LOOP_EX_BEGIN();

      // Values need to be copied from the record batches.
      sample_values[j].reserve(sample_indices.size());
      sample_idx[j].reserve(sample_indices.size());

      // The chunks are iterated over in the inner loop as columns can be treated independently.
      int last_idx = 0;
      int i = 0;
      auto it = table.get_column(j).begin<double>();
      for (auto idx : sample_indices) {
        std::advance(it, idx - last_idx);
        auto v = *it;
        if (std::fabs(v) > kZeroThreshold || std::isnan(v)) {
          sample_values[j].emplace_back(v);
          sample_idx[j].emplace_back(i);
        }
        last_idx = idx;
        i++;
      }
      OMP_LOOP_EX_END();
    }
    OMP_THROW_EX();

    // Finally, we initialize a loader from the sampled values
    DatasetLoader loader(config, nullptr, 1, nullptr);
    ret.reset(loader.ConstructFromSampleData(Vector2Ptr<double>(&sample_values).data(),
                                             Vector2Ptr<int>(&sample_idx).data(),
                                             table.get_num_columns(),
                                             VectorSize<double>(sample_values).data(),
                                             sample_count,
                                             table.get_num_rows(),
                                             table.get_num_rows()));
  } else {
    ret.reset(new Dataset(static_cast<data_size_t>(table.get_num_rows())));
    ret->CreateValid(reinterpret_cast<const Dataset*>(reference));
    if (ret->has_raw()) {
      ret->ResizeRaw(static_cast<int>(table.get_num_rows()));
    }
  }

  // After sampling and properly initializing all bins, we can add our data to the dataset. Here,
  // we parallelize across rows.
  OMP_INIT_EX();
  #pragma omp parallel for num_threads(OMP_NUM_THREADS()) schedule(static)
  for (int64_t j = 0; j < table.get_num_columns(); ++j) {
    OMP_LOOP_EX_BEGIN();
    const int tid = omp_get_thread_num();
    data_size_t idx = 0;
    auto column = table.get_column(j);
    for (auto it = column.begin<double>(), end = column.end<double>(); it != end; ++it) {
      ret->PushOneValue(tid, idx++, j, *it);
    }
    OMP_LOOP_EX_END();
  }
  OMP_THROW_EX();

  ret->FinishLoad();
  *out = ret.release();
1728
  API_END();
Guolin Ke's avatar
Guolin Ke committed
1729
1730
}

Guolin Ke's avatar
Guolin Ke committed
1731
int LGBM_DatasetGetSubset(
1732
  const DatasetHandle handle,
wxchan's avatar
wxchan committed
1733
1734
1735
  const int32_t* used_row_indices,
  int32_t num_used_row_indices,
  const char* parameters,
Guolin Ke's avatar
typo  
Guolin Ke committed
1736
  DatasetHandle* out) {
wxchan's avatar
wxchan committed
1737
  API_BEGIN();
Guolin Ke's avatar
Guolin Ke committed
1738
1739
  auto param = Config::Str2Map(parameters);
  Config config;
1740
  config.Set(param);
1741
  OMP_SET_NUM_THREADS(config.num_threads);
1742
  auto full_dataset = reinterpret_cast<const Dataset*>(handle);
1743
  CHECK_GT(num_used_row_indices, 0);
1744
1745
  const int32_t lower = 0;
  const int32_t upper = full_dataset->num_data() - 1;
1746
  CheckElementsIntervalClosed(used_row_indices, lower, upper, num_used_row_indices, "Used indices of subset");
1747
1748
1749
  if (!std::is_sorted(used_row_indices, used_row_indices + num_used_row_indices)) {
    Log::Fatal("used_row_indices should be sorted in Subset");
  }
Guolin Ke's avatar
Guolin Ke committed
1750
  auto ret = std::unique_ptr<Dataset>(new Dataset(num_used_row_indices));
1751
  ret->CopyFeatureMapperFrom(full_dataset);
1752
  ret->CopySubrow(full_dataset, used_row_indices, num_used_row_indices, true);
wxchan's avatar
wxchan committed
1753
1754
1755
1756
  *out = ret.release();
  API_END();
}

Guolin Ke's avatar
Guolin Ke committed
1757
int LGBM_DatasetSetFeatureNames(
Guolin Ke's avatar
typo  
Guolin Ke committed
1758
  DatasetHandle handle,
Guolin Ke's avatar
Guolin Ke committed
1759
  const char** feature_names,
Guolin Ke's avatar
Guolin Ke committed
1760
  int num_feature_names) {
Guolin Ke's avatar
Guolin Ke committed
1761
1762
1763
  API_BEGIN();
  auto dataset = reinterpret_cast<Dataset*>(handle);
  std::vector<std::string> feature_names_str;
Guolin Ke's avatar
Guolin Ke committed
1764
  for (int i = 0; i < num_feature_names; ++i) {
Guolin Ke's avatar
Guolin Ke committed
1765
1766
1767
1768
1769
1770
    feature_names_str.emplace_back(feature_names[i]);
  }
  dataset->set_feature_names(feature_names_str);
  API_END();
}

Guolin Ke's avatar
Guolin Ke committed
1771
int LGBM_DatasetGetFeatureNames(
1772
1773
1774
1775
1776
1777
    DatasetHandle handle,
    const int len,
    int* num_feature_names,
    const size_t buffer_len,
    size_t* out_buffer_len,
    char** feature_names) {
1778
  API_BEGIN();
1779
  *out_buffer_len = 0;
1780
1781
  auto dataset = reinterpret_cast<Dataset*>(handle);
  auto inside_feature_name = dataset->feature_names();
Guolin Ke's avatar
Guolin Ke committed
1782
1783
  *num_feature_names = static_cast<int>(inside_feature_name.size());
  for (int i = 0; i < *num_feature_names; ++i) {
1784
1785
1786
1787
1788
    if (i < len) {
      std::memcpy(feature_names[i], inside_feature_name[i].c_str(), std::min(inside_feature_name[i].size() + 1, buffer_len));
      feature_names[i][buffer_len - 1] = '\0';
    }
    *out_buffer_len = std::max(inside_feature_name[i].size() + 1, *out_buffer_len);
1789
1790
1791
1792
  }
  API_END();
}

1793
1794
1795
#ifdef _MSC_VER
  #pragma warning(disable : 4702)
#endif
Guolin Ke's avatar
Guolin Ke committed
1796
int LGBM_DatasetFree(DatasetHandle handle) {
1797
  API_BEGIN();
Guolin Ke's avatar
Guolin Ke committed
1798
  delete reinterpret_cast<Dataset*>(handle);
1799
  API_END();
1800
1801
}

Guolin Ke's avatar
Guolin Ke committed
1802
int LGBM_DatasetSaveBinary(DatasetHandle handle,
1803
                           const char* filename) {
1804
  API_BEGIN();
Guolin Ke's avatar
Guolin Ke committed
1805
1806
  auto dataset = reinterpret_cast<Dataset*>(handle);
  dataset->SaveBinaryFile(filename);
1807
  API_END();
1808
1809
}

1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
int LGBM_DatasetSerializeReferenceToBinary(DatasetHandle handle,
                                           ByteBufferHandle* out,
                                           int32_t* out_len) {
  API_BEGIN();
  auto dataset = reinterpret_cast<Dataset*>(handle);
  std::unique_ptr<LightGBM::ByteBuffer> ret;
  ret.reset(new LightGBM::ByteBuffer());
  dataset->SerializeReference(ret.get());
  *out_len = static_cast<int32_t>(ret->GetSize());
  *out = ret.release();
  API_END();
}

1823
1824
1825
1826
1827
1828
1829
1830
int LGBM_DatasetDumpText(DatasetHandle handle,
                         const char* filename) {
  API_BEGIN();
  auto dataset = reinterpret_cast<Dataset*>(handle);
  dataset->DumpTextFile(filename);
  API_END();
}

Guolin Ke's avatar
Guolin Ke committed
1831
int LGBM_DatasetSetField(DatasetHandle handle,
1832
1833
1834
1835
                         const char* field_name,
                         const void* field_data,
                         int num_element,
                         int type) {
1836
  API_BEGIN();
Guolin Ke's avatar
Guolin Ke committed
1837
  auto dataset = reinterpret_cast<Dataset*>(handle);
1838
  bool is_success = false;
Guolin Ke's avatar
Guolin Ke committed
1839
  if (type == C_API_DTYPE_FLOAT32) {
Guolin Ke's avatar
Guolin Ke committed
1840
    is_success = dataset->SetFloatField(field_name, reinterpret_cast<const float*>(field_data), static_cast<int32_t>(num_element));
Guolin Ke's avatar
Guolin Ke committed
1841
  } else if (type == C_API_DTYPE_INT32) {
Guolin Ke's avatar
Guolin Ke committed
1842
    is_success = dataset->SetIntField(field_name, reinterpret_cast<const int*>(field_data), static_cast<int32_t>(num_element));
Guolin Ke's avatar
Guolin Ke committed
1843
1844
  } else if (type == C_API_DTYPE_FLOAT64) {
    is_success = dataset->SetDoubleField(field_name, reinterpret_cast<const double*>(field_data), static_cast<int32_t>(num_element));
1845
  }
1846
  if (!is_success) { Log::Fatal("Input data type error or field not found"); }
1847
  API_END();
1848
1849
}

1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
int LGBM_DatasetSetFieldFromArrow(DatasetHandle handle,
                                  const char* field_name,
                                  int64_t n_chunks,
                                  const ArrowArray* chunks,
                                  const ArrowSchema* schema) {
  API_BEGIN();
  auto dataset = reinterpret_cast<Dataset*>(handle);
  ArrowChunkedArray ca(n_chunks, chunks, schema);
  auto is_success = dataset->SetFieldFromArrow(field_name, ca);
  if (!is_success) {
    Log::Fatal("Input field is not supported");
  }
  API_END();
}

Guolin Ke's avatar
Guolin Ke committed
1865
int LGBM_DatasetGetField(DatasetHandle handle,
1866
1867
1868
1869
                         const char* field_name,
                         int* out_len,
                         const void** out_ptr,
                         int* out_type) {
1870
  API_BEGIN();
Guolin Ke's avatar
Guolin Ke committed
1871
  auto dataset = reinterpret_cast<Dataset*>(handle);
1872
  bool is_success = false;
Guolin Ke's avatar
Guolin Ke committed
1873
  if (dataset->GetFloatField(field_name, out_len, reinterpret_cast<const float**>(out_ptr))) {
Guolin Ke's avatar
Guolin Ke committed
1874
    *out_type = C_API_DTYPE_FLOAT32;
1875
    is_success = true;
Guolin Ke's avatar
Guolin Ke committed
1876
  } else if (dataset->GetIntField(field_name, out_len, reinterpret_cast<const int**>(out_ptr))) {
Guolin Ke's avatar
Guolin Ke committed
1877
    *out_type = C_API_DTYPE_INT32;
1878
    is_success = true;
Guolin Ke's avatar
Guolin Ke committed
1879
1880
1881
  } else if (dataset->GetDoubleField(field_name, out_len, reinterpret_cast<const double**>(out_ptr))) {
    *out_type = C_API_DTYPE_FLOAT64;
    is_success = true;
Nikita Titov's avatar
Nikita Titov committed
1882
  }
1883
  if (!is_success) { Log::Fatal("Field not found"); }
wxchan's avatar
wxchan committed
1884
  if (*out_ptr == nullptr) { *out_len = 0; }
1885
  API_END();
1886
1887
}

1888
int LGBM_DatasetUpdateParamChecking(const char* old_parameters, const char* new_parameters) {
1889
  API_BEGIN();
1890
1891
1892
1893
1894
  auto old_param = Config::Str2Map(old_parameters);
  Config old_config;
  old_config.Set(old_param);
  auto new_param = Config::Str2Map(new_parameters);
  Booster::CheckDatasetResetConfig(old_config, new_param);
1895
1896
1897
  API_END();
}

Guolin Ke's avatar
Guolin Ke committed
1898
int LGBM_DatasetGetNumData(DatasetHandle handle,
1899
                           int* out) {
1900
  API_BEGIN();
Guolin Ke's avatar
Guolin Ke committed
1901
1902
  auto dataset = reinterpret_cast<Dataset*>(handle);
  *out = dataset->num_data();
1903
  API_END();
1904
1905
}

Guolin Ke's avatar
Guolin Ke committed
1906
int LGBM_DatasetGetNumFeature(DatasetHandle handle,
1907
                              int* out) {
1908
  API_BEGIN();
Guolin Ke's avatar
Guolin Ke committed
1909
1910
  auto dataset = reinterpret_cast<Dataset*>(handle);
  *out = dataset->num_total_features();
1911
  API_END();
Guolin Ke's avatar
Guolin Ke committed
1912
}
1913

1914
1915
1916
1917
1918
int LGBM_DatasetGetFeatureNumBin(DatasetHandle handle,
                                 int feature,
                                 int* out) {
  API_BEGIN();
  auto dataset = reinterpret_cast<Dataset*>(handle);
1919
1920
1921
1922
1923
  int num_features = dataset->num_total_features();
  if (feature < 0 || feature >= num_features) {
    Log::Fatal("Tried to retrieve number of bins for feature index %d, "
               "but the valid feature indices are [0, %d].", feature, num_features - 1);
  }
1924
1925
1926
1927
1928
1929
1930
1931
1932
  int inner_idx = dataset->InnerFeatureIndex(feature);
  if (inner_idx >= 0) {
    *out = dataset->FeatureNumBin(inner_idx);
  } else {
    *out = 0;
  }
  API_END();
}

1933
1934
1935
1936
1937
int LGBM_DatasetAddFeaturesFrom(DatasetHandle target,
                                DatasetHandle source) {
  API_BEGIN();
  auto target_d = reinterpret_cast<Dataset*>(target);
  auto source_d = reinterpret_cast<Dataset*>(source);
1938
  target_d->AddFeaturesFrom(source_d);
1939
1940
1941
  API_END();
}

1942
1943
// ---- start of booster

Guolin Ke's avatar
Guolin Ke committed
1944
int LGBM_BoosterCreate(const DatasetHandle train_data,
1945
1946
                       const char* parameters,
                       BoosterHandle* out) {
1947
  API_BEGIN();
Guolin Ke's avatar
Guolin Ke committed
1948
  const Dataset* p_train_data = reinterpret_cast<const Dataset*>(train_data);
wxchan's avatar
wxchan committed
1949
1950
  auto ret = std::unique_ptr<Booster>(new Booster(p_train_data, parameters));
  *out = ret.release();
1951
  API_END();
1952
1953
}

Guolin Ke's avatar
Guolin Ke committed
1954
int LGBM_BoosterCreateFromModelfile(
1955
  const char* filename,
Guolin Ke's avatar
Guolin Ke committed
1956
  int* out_num_iterations,
1957
  BoosterHandle* out) {
1958
  API_BEGIN();
wxchan's avatar
wxchan committed
1959
  auto ret = std::unique_ptr<Booster>(new Booster(filename));
Guolin Ke's avatar
Guolin Ke committed
1960
  *out_num_iterations = ret->GetBoosting()->GetCurrentIteration();
wxchan's avatar
wxchan committed
1961
  *out = ret.release();
1962
  API_END();
1963
1964
}

Guolin Ke's avatar
Guolin Ke committed
1965
int LGBM_BoosterLoadModelFromString(
1966
1967
1968
1969
  const char* model_str,
  int* out_num_iterations,
  BoosterHandle* out) {
  API_BEGIN();
wxchan's avatar
wxchan committed
1970
  auto ret = std::unique_ptr<Booster>(new Booster(nullptr));
1971
1972
1973
1974
1975
1976
  ret->LoadModelFromString(model_str);
  *out_num_iterations = ret->GetBoosting()->GetCurrentIteration();
  *out = ret.release();
  API_END();
}

1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
int LGBM_BoosterGetLoadedParam(
  BoosterHandle handle,
  int64_t buffer_len,
  int64_t* out_len,
  char* out_str) {
  API_BEGIN();
  Booster* ref_booster = reinterpret_cast<Booster*>(handle);
  std::string params = ref_booster->GetBoosting()->GetLoadedParam();
  *out_len = static_cast<int64_t>(params.size()) + 1;
  if (*out_len <= buffer_len) {
    std::memcpy(out_str, params.c_str(), *out_len);
  }
  API_END();
}

1992
1993
1994
#ifdef _MSC_VER
  #pragma warning(disable : 4702)
#endif
Guolin Ke's avatar
Guolin Ke committed
1995
int LGBM_BoosterFree(BoosterHandle handle) {
1996
  API_BEGIN();
Guolin Ke's avatar
Guolin Ke committed
1997
  delete reinterpret_cast<Booster*>(handle);
1998
  API_END();
1999
2000
}

2001
int LGBM_BoosterShuffleModels(BoosterHandle handle, int start_iter, int end_iter) {
2002
2003
  API_BEGIN();
  Booster* ref_booster = reinterpret_cast<Booster*>(handle);
2004
  ref_booster->ShuffleModels(start_iter, end_iter);
2005
2006
2007
  API_END();
}

Guolin Ke's avatar
Guolin Ke committed
2008
int LGBM_BoosterMerge(BoosterHandle handle,
2009
                      BoosterHandle other_handle) {
wxchan's avatar
wxchan committed
2010
2011
2012
2013
2014
2015
2016
  API_BEGIN();
  Booster* ref_booster = reinterpret_cast<Booster*>(handle);
  Booster* ref_other_booster = reinterpret_cast<Booster*>(other_handle);
  ref_booster->MergeFrom(ref_other_booster);
  API_END();
}

Guolin Ke's avatar
Guolin Ke committed
2017
int LGBM_BoosterAddValidData(BoosterHandle handle,
2018
                             const DatasetHandle valid_data) {
wxchan's avatar
wxchan committed
2019
2020
2021
2022
2023
2024
2025
  API_BEGIN();
  Booster* ref_booster = reinterpret_cast<Booster*>(handle);
  const Dataset* p_dataset = reinterpret_cast<const Dataset*>(valid_data);
  ref_booster->AddValidData(p_dataset);
  API_END();
}

Guolin Ke's avatar
Guolin Ke committed
2026
int LGBM_BoosterResetTrainingData(BoosterHandle handle,
2027
                                  const DatasetHandle train_data) {
wxchan's avatar
wxchan committed
2028
2029
2030
2031
2032
2033
2034
  API_BEGIN();
  Booster* ref_booster = reinterpret_cast<Booster*>(handle);
  const Dataset* p_dataset = reinterpret_cast<const Dataset*>(train_data);
  ref_booster->ResetTrainingData(p_dataset);
  API_END();
}

Guolin Ke's avatar
Guolin Ke committed
2035
int LGBM_BoosterResetParameter(BoosterHandle handle, const char* parameters) {
wxchan's avatar
wxchan committed
2036
2037
2038
2039
2040
2041
  API_BEGIN();
  Booster* ref_booster = reinterpret_cast<Booster*>(handle);
  ref_booster->ResetConfig(parameters);
  API_END();
}

Guolin Ke's avatar
Guolin Ke committed
2042
int LGBM_BoosterGetNumClasses(BoosterHandle handle, int* out_len) {
wxchan's avatar
wxchan committed
2043
2044
2045
2046
2047
2048
  API_BEGIN();
  Booster* ref_booster = reinterpret_cast<Booster*>(handle);
  *out_len = ref_booster->GetBoosting()->NumberOfClasses();
  API_END();
}

2049
int LGBM_BoosterGetLinear(BoosterHandle handle, int* out) {
2050
2051
  API_BEGIN();
  Booster* ref_booster = reinterpret_cast<Booster*>(handle);
2052
2053
2054
2055
2056
  if (ref_booster->GetBoosting()->IsLinear()) {
    *out = 1;
  } else {
    *out = 0;
  }
2057
2058
2059
  API_END();
}

Guolin Ke's avatar
Guolin Ke committed
2060
2061
2062
2063
2064
2065
2066
int LGBM_BoosterRefit(BoosterHandle handle, const int32_t* leaf_preds, int32_t nrow, int32_t ncol) {
  API_BEGIN();
  Booster* ref_booster = reinterpret_cast<Booster*>(handle);
  ref_booster->Refit(leaf_preds, nrow, ncol);
  API_END();
}

Guolin Ke's avatar
Guolin Ke committed
2067
int LGBM_BoosterUpdateOneIter(BoosterHandle handle, int* is_finished) {
2068
  API_BEGIN();
Guolin Ke's avatar
Guolin Ke committed
2069
  Booster* ref_booster = reinterpret_cast<Booster*>(handle);
2070
2071
2072
2073
2074
  if (ref_booster->TrainOneIter()) {
    *is_finished = 1;
  } else {
    *is_finished = 0;
  }
2075
  API_END();
2076
2077
}

Guolin Ke's avatar
Guolin Ke committed
2078
int LGBM_BoosterUpdateOneIterCustom(BoosterHandle handle,
2079
2080
2081
                                    const float* grad,
                                    const float* hess,
                                    int* is_finished) {
2082
  API_BEGIN();
2083
  #ifdef SCORE_T_USE_DOUBLE
2084
2085
2086
2087
  (void) handle;       // UNUSED VARIABLE
  (void) grad;         // UNUSED VARIABLE
  (void) hess;         // UNUSED VARIABLE
  (void) is_finished;  // UNUSED VARIABLE
2088
  Log::Fatal("Don't support custom loss function when SCORE_T_USE_DOUBLE is enabled");
2089
  #else
2090
  Booster* ref_booster = reinterpret_cast<Booster*>(handle);
2091
2092
2093
2094
2095
  if (ref_booster->TrainOneIter(grad, hess)) {
    *is_finished = 1;
  } else {
    *is_finished = 0;
  }
2096
  #endif
2097
  API_END();
2098
2099
}

Guolin Ke's avatar
Guolin Ke committed
2100
int LGBM_BoosterRollbackOneIter(BoosterHandle handle) {
wxchan's avatar
wxchan committed
2101
2102
2103
2104
2105
2106
  API_BEGIN();
  Booster* ref_booster = reinterpret_cast<Booster*>(handle);
  ref_booster->RollbackOneIter();
  API_END();
}

Guolin Ke's avatar
Guolin Ke committed
2107
int LGBM_BoosterGetCurrentIteration(BoosterHandle handle, int* out_iteration) {
wxchan's avatar
wxchan committed
2108
2109
2110
2111
2112
  API_BEGIN();
  Booster* ref_booster = reinterpret_cast<Booster*>(handle);
  *out_iteration = ref_booster->GetBoosting()->GetCurrentIteration();
  API_END();
}
Guolin Ke's avatar
Guolin Ke committed
2113

2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
int LGBM_BoosterNumModelPerIteration(BoosterHandle handle, int* out_tree_per_iteration) {
  API_BEGIN();
  Booster* ref_booster = reinterpret_cast<Booster*>(handle);
  *out_tree_per_iteration = ref_booster->GetBoosting()->NumModelPerIteration();
  API_END();
}

int LGBM_BoosterNumberOfTotalModel(BoosterHandle handle, int* out_models) {
  API_BEGIN();
  Booster* ref_booster = reinterpret_cast<Booster*>(handle);
  *out_models = ref_booster->GetBoosting()->NumberOfTotalModel();
  API_END();
}

Guolin Ke's avatar
Guolin Ke committed
2128
int LGBM_BoosterGetEvalCounts(BoosterHandle handle, int* out_len) {
wxchan's avatar
wxchan committed
2129
2130
2131
2132
2133
2134
  API_BEGIN();
  Booster* ref_booster = reinterpret_cast<Booster*>(handle);
  *out_len = ref_booster->GetEvalCounts();
  API_END();
}

2135
2136
2137
2138
2139
2140
int LGBM_BoosterGetEvalNames(BoosterHandle handle,
                             const int len,
                             int* out_len,
                             const size_t buffer_len,
                             size_t* out_buffer_len,
                             char** out_strs) {
wxchan's avatar
wxchan committed
2141
2142
  API_BEGIN();
  Booster* ref_booster = reinterpret_cast<Booster*>(handle);
2143
  *out_len = ref_booster->GetEvalNames(out_strs, len, buffer_len, out_buffer_len);
wxchan's avatar
wxchan committed
2144
2145
2146
  API_END();
}

2147
2148
2149
2150
2151
2152
int LGBM_BoosterGetFeatureNames(BoosterHandle handle,
                                const int len,
                                int* out_len,
                                const size_t buffer_len,
                                size_t* out_buffer_len,
                                char** out_strs) {
wxchan's avatar
wxchan committed
2153
2154
  API_BEGIN();
  Booster* ref_booster = reinterpret_cast<Booster*>(handle);
2155
  *out_len = ref_booster->GetFeatureNames(out_strs, len, buffer_len, out_buffer_len);
wxchan's avatar
wxchan committed
2156
2157
2158
  API_END();
}

Guolin Ke's avatar
Guolin Ke committed
2159
int LGBM_BoosterGetNumFeature(BoosterHandle handle, int* out_len) {
wxchan's avatar
wxchan committed
2160
2161
2162
2163
2164
2165
  API_BEGIN();
  Booster* ref_booster = reinterpret_cast<Booster*>(handle);
  *out_len = ref_booster->GetBoosting()->MaxFeatureIdx() + 1;
  API_END();
}

Guolin Ke's avatar
Guolin Ke committed
2166
int LGBM_BoosterGetEval(BoosterHandle handle,
2167
2168
2169
                        int data_idx,
                        int* out_len,
                        double* out_results) {
2170
  API_BEGIN();
Guolin Ke's avatar
Guolin Ke committed
2171
  Booster* ref_booster = reinterpret_cast<Booster*>(handle);
2172
  auto boosting = ref_booster->GetBoosting();
wxchan's avatar
wxchan committed
2173
  auto result_buf = boosting->GetEvalAt(data_idx);
Guolin Ke's avatar
Guolin Ke committed
2174
  *out_len = static_cast<int>(result_buf.size());
2175
  for (size_t i = 0; i < result_buf.size(); ++i) {
Guolin Ke's avatar
Guolin Ke committed
2176
    (out_results)[i] = static_cast<double>(result_buf[i]);
2177
  }
2178
  API_END();
2179
2180
}

Guolin Ke's avatar
Guolin Ke committed
2181
int LGBM_BoosterGetNumPredict(BoosterHandle handle,
2182
2183
                              int data_idx,
                              int64_t* out_len) {
Guolin Ke's avatar
Guolin Ke committed
2184
2185
2186
2187
2188
2189
  API_BEGIN();
  auto boosting = reinterpret_cast<Booster*>(handle)->GetBoosting();
  *out_len = boosting->GetNumPredictAt(data_idx);
  API_END();
}

Guolin Ke's avatar
Guolin Ke committed
2190
int LGBM_BoosterGetPredict(BoosterHandle handle,
2191
2192
2193
                           int data_idx,
                           int64_t* out_len,
                           double* out_result) {
2194
  API_BEGIN();
Guolin Ke's avatar
Guolin Ke committed
2195
  Booster* ref_booster = reinterpret_cast<Booster*>(handle);
2196
  ref_booster->GetPredictAt(data_idx, out_result, out_len);
2197
  API_END();
Guolin Ke's avatar
Guolin Ke committed
2198
2199
}

Guolin Ke's avatar
Guolin Ke committed
2200
int LGBM_BoosterPredictForFile(BoosterHandle handle,
2201
2202
2203
                               const char* data_filename,
                               int data_has_header,
                               int predict_type,
2204
                               int start_iteration,
2205
                               int num_iteration,
2206
                               const char* parameter,
2207
                               const char* result_filename) {
2208
  API_BEGIN();
Guolin Ke's avatar
Guolin Ke committed
2209
2210
  auto param = Config::Str2Map(parameter);
  Config config;
Guolin Ke's avatar
Guolin Ke committed
2211
  config.Set(param);
2212
  OMP_SET_NUM_THREADS(config.num_threads);
Guolin Ke's avatar
Guolin Ke committed
2213
  Booster* ref_booster = reinterpret_cast<Booster*>(handle);
2214
  ref_booster->Predict(start_iteration, num_iteration, predict_type, data_filename, data_has_header,
Guolin Ke's avatar
Guolin Ke committed
2215
                       config, result_filename);
2216
  API_END();
2217
2218
}

Guolin Ke's avatar
Guolin Ke committed
2219
int LGBM_BoosterCalcNumPredict(BoosterHandle handle,
2220
2221
                               int num_row,
                               int predict_type,
2222
                               int start_iteration,
2223
2224
                               int num_iteration,
                               int64_t* out_len) {
Guolin Ke's avatar
Guolin Ke committed
2225
2226
  API_BEGIN();
  Booster* ref_booster = reinterpret_cast<Booster*>(handle);
2227
  *out_len = static_cast<int64_t>(num_row) * ref_booster->GetBoosting()->NumPredictOneRow(start_iteration,
2228
    num_iteration, predict_type == C_API_PREDICT_LEAF_INDEX, predict_type == C_API_PREDICT_CONTRIB);
Guolin Ke's avatar
Guolin Ke committed
2229
2230
2231
  API_END();
}

2232
2233
2234
2235
2236
2237
// Naming: In future versions of LightGBM, public API named around `FastConfig` should be made named around
// `SingleRowPredictor`, because it is specific to single row prediction, and doesn't actually hold only config.
// For now this is kept as `FastConfig` for backwards compatibility.
// At the same time, one should consider removing the old non-fast single row public API that stores its Predictor
// in the Booster, because that will enable removing these Predictors from the Booster, and associated initialization
// code.
2238
2239
int LGBM_FastConfigFree(FastConfigHandle fastConfig) {
  API_BEGIN();
2240
  delete reinterpret_cast<SingleRowPredictor*>(fastConfig);
2241
2242
2243
  API_END();
}

Guolin Ke's avatar
Guolin Ke committed
2244
int LGBM_BoosterPredictForCSR(BoosterHandle handle,
2245
2246
2247
2248
2249
2250
2251
                              const void* indptr,
                              int indptr_type,
                              const int32_t* indices,
                              const void* data,
                              int data_type,
                              int64_t nindptr,
                              int64_t nelem,
2252
                              int64_t num_col,
2253
                              int predict_type,
2254
                              int start_iteration,
2255
                              int num_iteration,
2256
                              const char* parameter,
2257
2258
                              int64_t* out_len,
                              double* out_result) {
2259
  API_BEGIN();
2260
2261
2262
2263
2264
  if (num_col <= 0) {
    Log::Fatal("The number of columns should be greater than zero.");
  } else if (num_col >= INT32_MAX) {
    Log::Fatal("The number of columns should be smaller than INT32_MAX.");
  }
Guolin Ke's avatar
Guolin Ke committed
2265
2266
  auto param = Config::Str2Map(parameter);
  Config config;
Guolin Ke's avatar
Guolin Ke committed
2267
  config.Set(param);
2268
  OMP_SET_NUM_THREADS(config.num_threads);
Guolin Ke's avatar
Guolin Ke committed
2269
  Booster* ref_booster = reinterpret_cast<Booster*>(handle);
2270
  auto get_row_fun = RowFunctionFromCSR<int>(indptr, indptr_type, indices, data, data_type, nindptr, nelem);
Guolin Ke's avatar
Guolin Ke committed
2271
  int nrow = static_cast<int>(nindptr - 1);
2272
  ref_booster->Predict(start_iteration, num_iteration, predict_type, nrow, static_cast<int>(num_col), get_row_fun,
Guolin Ke's avatar
Guolin Ke committed
2273
                       config, out_result, out_len);
2274
  API_END();
Guolin Ke's avatar
Guolin Ke committed
2275
}
2276

2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
int LGBM_BoosterPredictSparseOutput(BoosterHandle handle,
                                    const void* indptr,
                                    int indptr_type,
                                    const int32_t* indices,
                                    const void* data,
                                    int data_type,
                                    int64_t nindptr,
                                    int64_t nelem,
                                    int64_t num_col_or_row,
                                    int predict_type,
2287
                                    int start_iteration,
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
                                    int num_iteration,
                                    const char* parameter,
                                    int matrix_type,
                                    int64_t* out_len,
                                    void** out_indptr,
                                    int32_t** out_indices,
                                    void** out_data) {
  API_BEGIN();
  Booster* ref_booster = reinterpret_cast<Booster*>(handle);
  auto param = Config::Str2Map(parameter);
  Config config;
  config.Set(param);
2300
  OMP_SET_NUM_THREADS(config.num_threads);
2301
2302
2303
2304
2305
2306
2307
2308
  if (matrix_type == C_API_MATRIX_TYPE_CSR) {
    if (num_col_or_row <= 0) {
      Log::Fatal("The number of columns should be greater than zero.");
    } else if (num_col_or_row >= INT32_MAX) {
      Log::Fatal("The number of columns should be smaller than INT32_MAX.");
    }
    auto get_row_fun = RowFunctionFromCSR<int64_t>(indptr, indptr_type, indices, data, data_type, nindptr, nelem);
    int64_t nrow = nindptr - 1;
2309
    ref_booster->PredictSparseCSR(start_iteration, num_iteration, predict_type, nrow, static_cast<int>(num_col_or_row), get_row_fun,
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
                                  config, out_len, out_indptr, indptr_type, out_indices, out_data, data_type);
  } else if (matrix_type == C_API_MATRIX_TYPE_CSC) {
    int num_threads = OMP_NUM_THREADS();
    int ncol = static_cast<int>(nindptr - 1);
    std::vector<std::vector<CSC_RowIterator>> iterators(num_threads, std::vector<CSC_RowIterator>());
    for (int i = 0; i < num_threads; ++i) {
      for (int j = 0; j < ncol; ++j) {
        iterators[i].emplace_back(indptr, indptr_type, indices, data, data_type, nindptr, nelem, j);
      }
    }
    std::function<std::vector<std::pair<int, double>>(int64_t row_idx)> get_row_fun =
      [&iterators, ncol](int64_t i) {
      std::vector<std::pair<int, double>> one_row;
      one_row.reserve(ncol);
      const int tid = omp_get_thread_num();
      for (int j = 0; j < ncol; ++j) {
        auto val = iterators[tid][j].Get(static_cast<int>(i));
        if (std::fabs(val) > kZeroThreshold || std::isnan(val)) {
          one_row.emplace_back(j, val);
        }
      }
      return one_row;
    };
2333
    ref_booster->PredictSparseCSC(start_iteration, num_iteration, predict_type, num_col_or_row, ncol, get_row_fun, config,
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
                                  out_len, out_indptr, indptr_type, out_indices, out_data, data_type);
  } else {
    Log::Fatal("Unknown matrix type in LGBM_BoosterPredictSparseOutput");
  }
  API_END();
}

int LGBM_BoosterFreePredictSparse(void* indptr, int32_t* indices, void* data, int indptr_type, int data_type) {
  API_BEGIN();
  if (indptr_type == C_API_DTYPE_INT32) {
2344
    delete[] reinterpret_cast<int32_t*>(indptr);
2345
  } else if (indptr_type == C_API_DTYPE_INT64) {
2346
    delete[] reinterpret_cast<int64_t*>(indptr);
2347
2348
2349
  } else {
    Log::Fatal("Unknown indptr type in LGBM_BoosterFreePredictSparse");
  }
2350
  delete[] indices;
2351
  if (data_type == C_API_DTYPE_FLOAT32) {
2352
    delete[] reinterpret_cast<float*>(data);
2353
  } else if (data_type == C_API_DTYPE_FLOAT64) {
2354
    delete[] reinterpret_cast<double*>(data);
2355
2356
2357
2358
2359
2360
  } else {
    Log::Fatal("Unknown data type in LGBM_BoosterFreePredictSparse");
  }
  API_END();
}

2361
int LGBM_BoosterPredictForCSRSingleRow(BoosterHandle handle,
2362
2363
2364
2365
2366
2367
2368
                                       const void* indptr,
                                       int indptr_type,
                                       const int32_t* indices,
                                       const void* data,
                                       int data_type,
                                       int64_t nindptr,
                                       int64_t nelem,
2369
                                       int64_t num_col,
2370
                                       int predict_type,
2371
                                       int start_iteration,
2372
2373
2374
2375
                                       int num_iteration,
                                       const char* parameter,
                                       int64_t* out_len,
                                       double* out_result) {
2376
  API_BEGIN();
2377
2378
2379
2380
2381
  if (num_col <= 0) {
    Log::Fatal("The number of columns should be greater than zero.");
  } else if (num_col >= INT32_MAX) {
    Log::Fatal("The number of columns should be smaller than INT32_MAX.");
  }
2382
2383
2384
  auto param = Config::Str2Map(parameter);
  Config config;
  config.Set(param);
2385
  OMP_SET_NUM_THREADS(config.num_threads);
2386
  Booster* ref_booster = reinterpret_cast<Booster*>(handle);
2387
  auto get_row_fun = RowFunctionFromCSR<int>(indptr, indptr_type, indices, data, data_type, nindptr, nelem);
2388
  ref_booster->SetSingleRowPredictorInner(start_iteration, num_iteration, predict_type, config);
2389
  ref_booster->PredictSingleRow(predict_type, static_cast<int32_t>(num_col), get_row_fun, config, out_result, out_len);
2390
2391
2392
  API_END();
}

2393
int LGBM_BoosterPredictForCSRSingleRowFastInit(BoosterHandle handle,
2394
                                               const int predict_type,
2395
                                               const int start_iteration,
2396
                                               const int num_iteration,
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
                                               const int data_type,
                                               const int64_t num_col,
                                               const char* parameter,
                                               FastConfigHandle *out_fastConfig) {
  API_BEGIN();
  if (num_col <= 0) {
    Log::Fatal("The number of columns should be greater than zero.");
  } else if (num_col >= INT32_MAX) {
    Log::Fatal("The number of columns should be smaller than INT32_MAX.");
  }

2408
  Booster* ref_booster = reinterpret_cast<Booster*>(handle);
2409

2410
2411
  std::unique_ptr<SingleRowPredictor> single_row_predictor =
    ref_booster->InitSingleRowPredictor(start_iteration, num_iteration, predict_type, data_type, static_cast<int32_t>(num_col), parameter);
2412

2413
  OMP_SET_NUM_THREADS(single_row_predictor->config.num_threads);
2414

2415
  *out_fastConfig = single_row_predictor.release();
2416
2417
2418
2419
2420
  API_END();
}

int LGBM_BoosterPredictForCSRSingleRowFast(FastConfigHandle fastConfig_handle,
                                           const void* indptr,
2421
                                           const int indptr_type,
2422
2423
                                           const int32_t* indices,
                                           const void* data,
2424
2425
                                           const int64_t nindptr,
                                           const int64_t nelem,
2426
2427
2428
                                           int64_t* out_len,
                                           double* out_result) {
  API_BEGIN();
2429
2430
2431
  SingleRowPredictor *single_row_predictor = reinterpret_cast<SingleRowPredictor*>(fastConfig_handle);
  auto get_row_fun = RowFunctionFromCSR<int>(indptr, indptr_type, indices, data, single_row_predictor->data_type, nindptr, nelem);
  single_row_predictor->Predict(get_row_fun, out_result, out_len);
2432
2433
2434
  API_END();
}

2435

Guolin Ke's avatar
Guolin Ke committed
2436
int LGBM_BoosterPredictForCSC(BoosterHandle handle,
2437
2438
2439
2440
2441
2442
2443
2444
2445
                              const void* col_ptr,
                              int col_ptr_type,
                              const int32_t* indices,
                              const void* data,
                              int data_type,
                              int64_t ncol_ptr,
                              int64_t nelem,
                              int64_t num_row,
                              int predict_type,
2446
                              int start_iteration,
2447
                              int num_iteration,
2448
                              const char* parameter,
2449
2450
                              int64_t* out_len,
                              double* out_result) {
Guolin Ke's avatar
Guolin Ke committed
2451
2452
  API_BEGIN();
  Booster* ref_booster = reinterpret_cast<Booster*>(handle);
Guolin Ke's avatar
Guolin Ke committed
2453
2454
  auto param = Config::Str2Map(parameter);
  Config config;
Guolin Ke's avatar
Guolin Ke committed
2455
  config.Set(param);
2456
  OMP_SET_NUM_THREADS(config.num_threads);
2457
  int num_threads = OMP_NUM_THREADS();
Guolin Ke's avatar
Guolin Ke committed
2458
  int ncol = static_cast<int>(ncol_ptr - 1);
Guolin Ke's avatar
Guolin Ke committed
2459
2460
2461
2462
2463
  std::vector<std::vector<CSC_RowIterator>> iterators(num_threads, std::vector<CSC_RowIterator>());
  for (int i = 0; i < num_threads; ++i) {
    for (int j = 0; j < ncol; ++j) {
      iterators[i].emplace_back(col_ptr, col_ptr_type, indices, data, data_type, ncol_ptr, nelem, j);
    }
Guolin Ke's avatar
Guolin Ke committed
2464
2465
  }
  std::function<std::vector<std::pair<int, double>>(int row_idx)> get_row_fun =
Guolin Ke's avatar
Guolin Ke committed
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
      [&iterators, ncol](int i) {
        std::vector<std::pair<int, double>> one_row;
        one_row.reserve(ncol);
        const int tid = omp_get_thread_num();
        for (int j = 0; j < ncol; ++j) {
          auto val = iterators[tid][j].Get(i);
          if (std::fabs(val) > kZeroThreshold || std::isnan(val)) {
            one_row.emplace_back(j, val);
          }
        }
        return one_row;
      };
2478
  ref_booster->Predict(start_iteration, num_iteration, predict_type, static_cast<int>(num_row), ncol, get_row_fun, config,
cbecker's avatar
cbecker committed
2479
                       out_result, out_len);
Guolin Ke's avatar
Guolin Ke committed
2480
2481
2482
  API_END();
}

2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
int LGBM_BoosterValidateFeatureNames(BoosterHandle handle,
                                     const char** data_names,
                                     int data_num_features) {
  API_BEGIN();
  int booster_num_features;
  size_t out_buffer_len;
  LGBM_BoosterGetFeatureNames(handle, 0, &booster_num_features, 0, &out_buffer_len, nullptr);
  if (booster_num_features != data_num_features) {
    Log::Fatal("Model was trained on %d features, but got %d input features to predict.", booster_num_features, data_num_features);
  }
  std::vector<std::vector<char>> tmp_names(booster_num_features, std::vector<char>(out_buffer_len));
  std::vector<char*> booster_names = Vector2Ptr(&tmp_names);
  LGBM_BoosterGetFeatureNames(handle, data_num_features, &booster_num_features, out_buffer_len, &out_buffer_len, booster_names.data());
  for (int i = 0; i < booster_num_features; ++i) {
    if (strcmp(data_names[i], booster_names[i]) != 0) {
      Log::Fatal("Expected '%s' at position %d but found '%s'", booster_names[i], i, data_names[i]);
    }
  }
  API_END();
}

Guolin Ke's avatar
Guolin Ke committed
2504
int LGBM_BoosterPredictForMat(BoosterHandle handle,
2505
2506
2507
2508
2509
2510
                              const void* data,
                              int data_type,
                              int32_t nrow,
                              int32_t ncol,
                              int is_row_major,
                              int predict_type,
2511
                              int start_iteration,
2512
                              int num_iteration,
2513
                              const char* parameter,
2514
2515
                              int64_t* out_len,
                              double* out_result) {
2516
  API_BEGIN();
Guolin Ke's avatar
Guolin Ke committed
2517
2518
  auto param = Config::Str2Map(parameter);
  Config config;
Guolin Ke's avatar
Guolin Ke committed
2519
  config.Set(param);
2520
  OMP_SET_NUM_THREADS(config.num_threads);
Guolin Ke's avatar
Guolin Ke committed
2521
  Booster* ref_booster = reinterpret_cast<Booster*>(handle);
2522
  auto get_row_fun = RowPairFunctionFromDenseMatric(data, nrow, ncol, data_type, is_row_major);
2523
  ref_booster->Predict(start_iteration, num_iteration, predict_type, nrow, ncol, get_row_fun,
Guolin Ke's avatar
Guolin Ke committed
2524
                       config, out_result, out_len);
2525
  API_END();
Guolin Ke's avatar
Guolin Ke committed
2526
}
2527

2528
int LGBM_BoosterPredictForMatSingleRow(BoosterHandle handle,
2529
2530
2531
2532
2533
                                       const void* data,
                                       int data_type,
                                       int32_t ncol,
                                       int is_row_major,
                                       int predict_type,
2534
                                       int start_iteration,
2535
2536
2537
2538
                                       int num_iteration,
                                       const char* parameter,
                                       int64_t* out_len,
                                       double* out_result) {
2539
2540
2541
2542
  API_BEGIN();
  auto param = Config::Str2Map(parameter);
  Config config;
  config.Set(param);
2543
  OMP_SET_NUM_THREADS(config.num_threads);
2544
2545
  Booster* ref_booster = reinterpret_cast<Booster*>(handle);
  auto get_row_fun = RowPairFunctionFromDenseMatric(data, 1, ncol, data_type, is_row_major);
2546
  ref_booster->SetSingleRowPredictorInner(start_iteration, num_iteration, predict_type, config);
2547
  ref_booster->PredictSingleRow(predict_type, ncol, get_row_fun, config, out_result, out_len);
2548
2549
2550
  API_END();
}

2551
int LGBM_BoosterPredictForMatSingleRowFastInit(BoosterHandle handle,
2552
                                               const int predict_type,
2553
                                               const int start_iteration,
2554
                                               const int num_iteration,
2555
2556
2557
2558
2559
                                               const int data_type,
                                               const int32_t ncol,
                                               const char* parameter,
                                               FastConfigHandle *out_fastConfig) {
  API_BEGIN();
2560
  Booster* ref_booster = reinterpret_cast<Booster*>(handle);
2561

2562
2563
  std::unique_ptr<SingleRowPredictor> single_row_predictor =
    ref_booster->InitSingleRowPredictor(predict_type, start_iteration, num_iteration, data_type, ncol, parameter);
2564

2565
  OMP_SET_NUM_THREADS(single_row_predictor->config.num_threads);
2566

2567
  *out_fastConfig = single_row_predictor.release();
2568
2569
2570
2571
2572
2573
2574
2575
  API_END();
}

int LGBM_BoosterPredictForMatSingleRowFast(FastConfigHandle fastConfig_handle,
                                           const void* data,
                                           int64_t* out_len,
                                           double* out_result) {
  API_BEGIN();
2576
  SingleRowPredictor *single_row_predictor = reinterpret_cast<SingleRowPredictor*>(fastConfig_handle);
2577
  // Single row in row-major format:
2578
2579
  auto get_row_fun = RowPairFunctionFromDenseMatric(data, 1, single_row_predictor->num_cols, single_row_predictor->data_type, 1);
  single_row_predictor->Predict(get_row_fun, out_result, out_len);
2580
2581
2582
  API_END();
}

2583

2584
2585
2586
2587
2588
2589
int LGBM_BoosterPredictForMats(BoosterHandle handle,
                               const void** data,
                               int data_type,
                               int32_t nrow,
                               int32_t ncol,
                               int predict_type,
2590
                               int start_iteration,
2591
2592
2593
2594
2595
2596
2597
2598
                               int num_iteration,
                               const char* parameter,
                               int64_t* out_len,
                               double* out_result) {
  API_BEGIN();
  auto param = Config::Str2Map(parameter);
  Config config;
  config.Set(param);
2599
  OMP_SET_NUM_THREADS(config.num_threads);
2600
2601
  Booster* ref_booster = reinterpret_cast<Booster*>(handle);
  auto get_row_fun = RowPairFunctionFromDenseRows(data, ncol, data_type);
2602
  ref_booster->Predict(start_iteration, num_iteration, predict_type, nrow, ncol, get_row_fun, config, out_result, out_len);
2603
2604
2605
  API_END();
}

2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656
int LGBM_BoosterPredictForArrow(BoosterHandle handle,
                                int64_t n_chunks,
                                const ArrowArray* chunks,
                                const ArrowSchema* schema,
                                int predict_type,
                                int start_iteration,
                                int num_iteration,
                                const char* parameter,
                                int64_t* out_len,
                                double* out_result) {
  API_BEGIN();

  // Apply the configuration
  auto param = Config::Str2Map(parameter);
  Config config;
  config.Set(param);
  OMP_SET_NUM_THREADS(config.num_threads);

  // Set up chunked array and iterators for all columns
  ArrowTable table(n_chunks, chunks, schema);
  std::vector<ArrowChunkedArray::Iterator<double>> its;
  its.reserve(table.get_num_columns());
  for (int64_t j = 0; j < table.get_num_columns(); ++j) {
    its.emplace_back(table.get_column(j).begin<double>());
  }

  // Build row function
  auto num_columns = table.get_num_columns();
  auto row_fn = [num_columns, &its] (int row_idx) {
    std::vector<std::pair<int, double>> result;
    result.reserve(num_columns);
    for (int64_t j = 0; j < num_columns; ++j) {
      result.emplace_back(static_cast<int>(j), its[j][row_idx]);
    }
    return result;
  };

  // Run prediction
  Booster* ref_booster = reinterpret_cast<Booster*>(handle);
  ref_booster->Predict(start_iteration,
                       num_iteration,
                       predict_type,
                       static_cast<int>(table.get_num_rows()),
                       static_cast<int>(table.get_num_columns()),
                       row_fn,
                       config,
                       out_result,
                       out_len);
  API_END();
}

Guolin Ke's avatar
Guolin Ke committed
2657
int LGBM_BoosterSaveModel(BoosterHandle handle,
2658
                          int start_iteration,
2659
                          int num_iteration,
2660
                          int feature_importance_type,
2661
                          const char* filename) {
2662
  API_BEGIN();
Guolin Ke's avatar
Guolin Ke committed
2663
  Booster* ref_booster = reinterpret_cast<Booster*>(handle);
2664
2665
  ref_booster->SaveModelToFile(start_iteration, num_iteration,
                               feature_importance_type, filename);
wxchan's avatar
wxchan committed
2666
2667
2668
  API_END();
}

Guolin Ke's avatar
Guolin Ke committed
2669
int LGBM_BoosterSaveModelToString(BoosterHandle handle,
2670
                                  int start_iteration,
2671
                                  int num_iteration,
2672
                                  int feature_importance_type,
2673
                                  int64_t buffer_len,
2674
                                  int64_t* out_len,
2675
                                  char* out_str) {
2676
2677
  API_BEGIN();
  Booster* ref_booster = reinterpret_cast<Booster*>(handle);
2678
2679
  std::string model = ref_booster->SaveModelToString(
      start_iteration, num_iteration, feature_importance_type);
2680
  *out_len = static_cast<int64_t>(model.size()) + 1;
2681
  if (*out_len <= buffer_len) {
Guolin Ke's avatar
Guolin Ke committed
2682
    std::memcpy(out_str, model.c_str(), *out_len);
2683
2684
2685
2686
  }
  API_END();
}

Guolin Ke's avatar
Guolin Ke committed
2687
int LGBM_BoosterDumpModel(BoosterHandle handle,
2688
                          int start_iteration,
2689
                          int num_iteration,
2690
                          int feature_importance_type,
2691
2692
                          int64_t buffer_len,
                          int64_t* out_len,
2693
                          char* out_str) {
wxchan's avatar
wxchan committed
2694
2695
  API_BEGIN();
  Booster* ref_booster = reinterpret_cast<Booster*>(handle);
2696
2697
  std::string model = ref_booster->DumpModel(start_iteration, num_iteration,
                                             feature_importance_type);
2698
  *out_len = static_cast<int64_t>(model.size()) + 1;
wxchan's avatar
wxchan committed
2699
  if (*out_len <= buffer_len) {
Guolin Ke's avatar
Guolin Ke committed
2700
    std::memcpy(out_str, model.c_str(), *out_len);
wxchan's avatar
wxchan committed
2701
  }
2702
  API_END();
Guolin Ke's avatar
Guolin Ke committed
2703
}
2704

Guolin Ke's avatar
Guolin Ke committed
2705
int LGBM_BoosterGetLeafValue(BoosterHandle handle,
2706
2707
2708
                             int tree_idx,
                             int leaf_idx,
                             double* out_val) {
Guolin Ke's avatar
Guolin Ke committed
2709
2710
  API_BEGIN();
  Booster* ref_booster = reinterpret_cast<Booster*>(handle);
Guolin Ke's avatar
Guolin Ke committed
2711
  *out_val = static_cast<double>(ref_booster->GetLeafValue(tree_idx, leaf_idx));
Guolin Ke's avatar
Guolin Ke committed
2712
2713
2714
  API_END();
}

Guolin Ke's avatar
Guolin Ke committed
2715
int LGBM_BoosterSetLeafValue(BoosterHandle handle,
2716
2717
2718
                             int tree_idx,
                             int leaf_idx,
                             double val) {
Guolin Ke's avatar
Guolin Ke committed
2719
2720
  API_BEGIN();
  Booster* ref_booster = reinterpret_cast<Booster*>(handle);
Guolin Ke's avatar
Guolin Ke committed
2721
  ref_booster->SetLeafValue(tree_idx, leaf_idx, val);
Guolin Ke's avatar
Guolin Ke committed
2722
2723
2724
  API_END();
}

2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
int LGBM_BoosterFeatureImportance(BoosterHandle handle,
                                  int num_iteration,
                                  int importance_type,
                                  double* out_results) {
  API_BEGIN();
  Booster* ref_booster = reinterpret_cast<Booster*>(handle);
  std::vector<double> feature_importances = ref_booster->FeatureImportance(num_iteration, importance_type);
  for (size_t i = 0; i < feature_importances.size(); ++i) {
    (out_results)[i] = feature_importances[i];
  }
  API_END();
}

2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754
2755
int LGBM_BoosterGetUpperBoundValue(BoosterHandle handle,
                                   double* out_results) {
  API_BEGIN();
  Booster* ref_booster = reinterpret_cast<Booster*>(handle);
  double max_value = ref_booster->UpperBoundValue();
  *out_results = max_value;
  API_END();
}

int LGBM_BoosterGetLowerBoundValue(BoosterHandle handle,
                                   double* out_results) {
  API_BEGIN();
  Booster* ref_booster = reinterpret_cast<Booster*>(handle);
  double min_value = ref_booster->LowerBoundValue();
  *out_results = min_value;
  API_END();
}

2756
2757
2758
2759
2760
int LGBM_NetworkInit(const char* machines,
                     int local_listen_port,
                     int listen_time_out,
                     int num_machines) {
  API_BEGIN();
Guolin Ke's avatar
Guolin Ke committed
2761
  Config config;
2762
  config.machines = RemoveQuotationSymbol(std::string(machines));
2763
2764
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776
2777
  config.local_listen_port = local_listen_port;
  config.num_machines = num_machines;
  config.time_out = listen_time_out;
  if (num_machines > 1) {
    Network::Init(config);
  }
  API_END();
}

int LGBM_NetworkFree() {
  API_BEGIN();
  Network::Dispose();
  API_END();
}

2778
2779
2780
int LGBM_NetworkInitWithFunctions(int num_machines, int rank,
                                  void* reduce_scatter_ext_fun,
                                  void* allgather_ext_fun) {
ww's avatar
ww committed
2781
  API_BEGIN();
Guolin Ke's avatar
Guolin Ke committed
2782
  if (num_machines > 1) {
2783
    Network::Init(num_machines, rank, (ReduceScatterFunction)reduce_scatter_ext_fun, (AllgatherFunction)allgather_ext_fun);
ww's avatar
ww committed
2784
2785
2786
  }
  API_END();
}
Guolin Ke's avatar
Guolin Ke committed
2787

2788
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804
int LGBM_SetMaxThreads(int num_threads) {
  API_BEGIN();
  if (num_threads <= 0) {
    LGBM_MAX_NUM_THREADS = -1;
  } else {
    LGBM_MAX_NUM_THREADS = num_threads;
  }
  API_END();
}

int LGBM_GetMaxThreads(int* out) {
  API_BEGIN();
  *out = LGBM_MAX_NUM_THREADS;
  API_END();
}


Guolin Ke's avatar
Guolin Ke committed
2805
// ---- start of some help functions
2806

2807
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831

template<typename T>
std::function<std::vector<double>(int row_idx)>
RowFunctionFromDenseMatric_helper(const void* data, int num_row, int num_col, int is_row_major) {
  const T* data_ptr = reinterpret_cast<const T*>(data);
  if (is_row_major) {
    return [=] (int row_idx) {
      std::vector<double> ret(num_col);
      auto tmp_ptr = data_ptr + static_cast<size_t>(num_col) * row_idx;
      for (int i = 0; i < num_col; ++i) {
        ret[i] = static_cast<double>(*(tmp_ptr + i));
      }
      return ret;
    };
  } else {
    return [=] (int row_idx) {
      std::vector<double> ret(num_col);
      for (int i = 0; i < num_col; ++i) {
        ret[i] = static_cast<double>(*(data_ptr + static_cast<size_t>(num_row) * i + row_idx));
      }
      return ret;
    };
  }
}

2832
2833
std::function<std::vector<double>(int row_idx)>
RowFunctionFromDenseMatric(const void* data, int num_row, int num_col, int data_type, int is_row_major) {
Guolin Ke's avatar
Guolin Ke committed
2834
  if (data_type == C_API_DTYPE_FLOAT32) {
2835
    return RowFunctionFromDenseMatric_helper<float>(data, num_row, num_col, is_row_major);
Guolin Ke's avatar
Guolin Ke committed
2836
  } else if (data_type == C_API_DTYPE_FLOAT64) {
2837
    return RowFunctionFromDenseMatric_helper<double>(data, num_row, num_col, is_row_major);
2838
  }
2839
  Log::Fatal("Unknown data type in RowFunctionFromDenseMatric");
2840
  return nullptr;
2841
2842
2843
2844
}

std::function<std::vector<std::pair<int, double>>(int row_idx)>
RowPairFunctionFromDenseMatric(const void* data, int num_row, int num_col, int data_type, int is_row_major) {
Guolin Ke's avatar
Guolin Ke committed
2845
2846
  auto inner_function = RowFunctionFromDenseMatric(data, num_row, num_col, data_type, is_row_major);
  if (inner_function != nullptr) {
2847
    return [inner_function] (int row_idx) {
Guolin Ke's avatar
Guolin Ke committed
2848
2849
      auto raw_values = inner_function(row_idx);
      std::vector<std::pair<int, double>> ret;
Guolin Ke's avatar
Guolin Ke committed
2850
      ret.reserve(raw_values.size());
Guolin Ke's avatar
Guolin Ke committed
2851
      for (int i = 0; i < static_cast<int>(raw_values.size()); ++i) {
Guolin Ke's avatar
Guolin Ke committed
2852
        if (std::fabs(raw_values[i]) > kZeroThreshold || std::isnan(raw_values[i])) {
Guolin Ke's avatar
Guolin Ke committed
2853
          ret.emplace_back(i, raw_values[i]);
2854
        }
Guolin Ke's avatar
Guolin Ke committed
2855
2856
2857
      }
      return ret;
    };
2858
  }
Guolin Ke's avatar
Guolin Ke committed
2859
  return nullptr;
2860
2861
}

2862
2863
2864
2865
2866
2867
2868
// data is array of pointers to individual rows
std::function<std::vector<std::pair<int, double>>(int row_idx)>
RowPairFunctionFromDenseRows(const void** data, int num_col, int data_type) {
  return [=](int row_idx) {
    auto inner_function = RowFunctionFromDenseMatric(data[row_idx], 1, num_col, data_type, /* is_row_major */ true);
    auto raw_values = inner_function(0);
    std::vector<std::pair<int, double>> ret;
Guolin Ke's avatar
Guolin Ke committed
2869
    ret.reserve(raw_values.size());
2870
2871
2872
2873
2874
2875
2876
2877
2878
    for (int i = 0; i < static_cast<int>(raw_values.size()); ++i) {
      if (std::fabs(raw_values[i]) > kZeroThreshold || std::isnan(raw_values[i])) {
        ret.emplace_back(i, raw_values[i]);
      }
    }
    return ret;
  };
}

2879
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897
template<typename T, typename T1, typename T2>
std::function<std::vector<std::pair<int, double>>(T idx)>
RowFunctionFromCSR_helper(const void* indptr, const int32_t* indices, const void* data) {
  const T1* data_ptr = reinterpret_cast<const T1*>(data);
  const T2* ptr_indptr = reinterpret_cast<const T2*>(indptr);
  return [=] (T idx) {
    std::vector<std::pair<int, double>> ret;
    int64_t start = ptr_indptr[idx];
    int64_t end = ptr_indptr[idx + 1];
    if (end - start > 0)  {
      ret.reserve(end - start);
    }
    for (int64_t i = start; i < end; ++i) {
      ret.emplace_back(indices[i], data_ptr[i]);
    }
    return ret;
  };
}

2898
2899
template<typename T>
std::function<std::vector<std::pair<int, double>>(T idx)>
2900
RowFunctionFromCSR(const void* indptr, int indptr_type, const int32_t* indices, const void* data, int data_type, int64_t , int64_t ) {
Guolin Ke's avatar
Guolin Ke committed
2901
2902
  if (data_type == C_API_DTYPE_FLOAT32) {
    if (indptr_type == C_API_DTYPE_INT32) {
2903
     return RowFunctionFromCSR_helper<T, float, int32_t>(indptr, indices, data);
Guolin Ke's avatar
Guolin Ke committed
2904
    } else if (indptr_type == C_API_DTYPE_INT64) {
2905
     return RowFunctionFromCSR_helper<T, float, int64_t>(indptr, indices, data);
2906
    }
Guolin Ke's avatar
Guolin Ke committed
2907
2908
  } else if (data_type == C_API_DTYPE_FLOAT64) {
    if (indptr_type == C_API_DTYPE_INT32) {
2909
     return RowFunctionFromCSR_helper<T, double, int32_t>(indptr, indices, data);
Guolin Ke's avatar
Guolin Ke committed
2910
    } else if (indptr_type == C_API_DTYPE_INT64) {
2911
     return RowFunctionFromCSR_helper<T, double, int64_t>(indptr, indices, data);
Guolin Ke's avatar
Guolin Ke committed
2912
2913
    }
  }
2914
  Log::Fatal("Unknown data type in RowFunctionFromCSR");
2915
  return nullptr;
2916
2917
}

2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930
2931
2932
2933
2934
2935
2936


template <typename T1, typename T2>
std::function<std::pair<int, double>(int idx)> IterateFunctionFromCSC_helper(const void* col_ptr, const int32_t* indices, const void* data, int col_idx) {
  const T1* data_ptr = reinterpret_cast<const T1*>(data);
  const T2* ptr_col_ptr = reinterpret_cast<const T2*>(col_ptr);
  int64_t start = ptr_col_ptr[col_idx];
  int64_t end = ptr_col_ptr[col_idx + 1];
  return [=] (int offset) {
    int64_t i = static_cast<int64_t>(start + offset);
    if (i >= end) {
      return std::make_pair(-1, 0.0);
    }
    int idx = static_cast<int>(indices[i]);
    double val = static_cast<double>(data_ptr[i]);
    return std::make_pair(idx, val);
  };
}

Guolin Ke's avatar
Guolin Ke committed
2937
std::function<std::pair<int, double>(int idx)>
2938
IterateFunctionFromCSC(const void* col_ptr, int col_ptr_type, const int32_t* indices, const void* data, int data_type, int64_t ncol_ptr, int64_t , int col_idx) {
Guolin Ke's avatar
Guolin Ke committed
2939
  CHECK(col_idx < ncol_ptr && col_idx >= 0);
Guolin Ke's avatar
Guolin Ke committed
2940
2941
  if (data_type == C_API_DTYPE_FLOAT32) {
    if (col_ptr_type == C_API_DTYPE_INT32) {
2942
      return IterateFunctionFromCSC_helper<float, int32_t>(col_ptr, indices, data, col_idx);
Guolin Ke's avatar
Guolin Ke committed
2943
    } else if (col_ptr_type == C_API_DTYPE_INT64) {
2944
      return IterateFunctionFromCSC_helper<float, int64_t>(col_ptr, indices, data, col_idx);
Guolin Ke's avatar
Guolin Ke committed
2945
    }
Guolin Ke's avatar
Guolin Ke committed
2946
2947
  } else if (data_type == C_API_DTYPE_FLOAT64) {
    if (col_ptr_type == C_API_DTYPE_INT32) {
2948
      return IterateFunctionFromCSC_helper<double, int32_t>(col_ptr, indices, data, col_idx);
Guolin Ke's avatar
Guolin Ke committed
2949
    } else if (col_ptr_type == C_API_DTYPE_INT64) {
2950
      return IterateFunctionFromCSC_helper<double, int64_t>(col_ptr, indices, data, col_idx);
Guolin Ke's avatar
Guolin Ke committed
2951
2952
    }
  }
2953
  Log::Fatal("Unknown data type in CSC matrix");
2954
  return nullptr;
2955
2956
}

Guolin Ke's avatar
Guolin Ke committed
2957
CSC_RowIterator::CSC_RowIterator(const void* col_ptr, int col_ptr_type, const int32_t* indices,
2958
                                 const void* data, int data_type, int64_t ncol_ptr, int64_t nelem, int col_idx) {
Guolin Ke's avatar
Guolin Ke committed
2959
2960
2961
2962
2963
2964
2965
2966
2967
  iter_fun_ = IterateFunctionFromCSC(col_ptr, col_ptr_type, indices, data, data_type, ncol_ptr, nelem, col_idx);
}

double CSC_RowIterator::Get(int idx) {
  while (idx > cur_idx_ && !is_end_) {
    auto ret = iter_fun_(nonzero_idx_);
    if (ret.first < 0) {
      is_end_ = true;
      break;
2968
    }
Guolin Ke's avatar
Guolin Ke committed
2969
2970
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985
    cur_idx_ = ret.first;
    cur_val_ = ret.second;
    ++nonzero_idx_;
  }
  if (idx == cur_idx_) {
    return cur_val_;
  } else {
    return 0.0f;
  }
}

std::pair<int, double> CSC_RowIterator::NextNonZero() {
  if (!is_end_) {
    auto ret = iter_fun_(nonzero_idx_);
    ++nonzero_idx_;
    if (ret.first < 0) {
      is_end_ = true;
2986
    }
Guolin Ke's avatar
Guolin Ke committed
2987
2988
2989
    return ret;
  } else {
    return std::make_pair(-1, 0.0);
2990
  }
Guolin Ke's avatar
Guolin Ke committed
2991
}