c_api.cpp 115 KB
Newer Older
1
2
3
4
/*!
 * Copyright (c) 2016 Microsoft Corporation. All rights reserved.
 * Licensed under the MIT License. See LICENSE file in the project root for license information.
 */
Guolin Ke's avatar
Guolin Ke committed
5
#include <LightGBM/c_api.h>
Guolin Ke's avatar
Guolin Ke committed
6

7
#include <LightGBM/arrow.h>
Guolin Ke's avatar
Guolin Ke committed
8
9
#include <LightGBM/boosting.h>
#include <LightGBM/config.h>
10
11
12
#include <LightGBM/dataset.h>
#include <LightGBM/dataset_loader.h>
#include <LightGBM/metric.h>
13
#include <LightGBM/network.h>
14
15
#include <LightGBM/objective_function.h>
#include <LightGBM/prediction_early_stop.h>
16
#include <LightGBM/utils/byte_buffer.h>
17
18
19
20
21
#include <LightGBM/utils/common.h>
#include <LightGBM/utils/log.h>
#include <LightGBM/utils/openmp_wrapper.h>
#include <LightGBM/utils/random.h>
#include <LightGBM/utils/threading.h>
Guolin Ke's avatar
Guolin Ke committed
22

23
#include <algorithm>
24
#include <cstdint>
25
#include <cstdio>
26
27
28
29
#include <functional>
#include <memory>
#include <mutex>
#include <stdexcept>
30
31
32
#include <string>
#include <unordered_map>
#include <utility>
33
34
#include <vector>

35
#include "application/predictor.hpp"
36
37
#include <LightGBM/utils/yamc/alternate_shared_mutex.hpp>
#include <LightGBM/utils/yamc/yamc_shared_lock.hpp>
Guolin Ke's avatar
Guolin Ke committed
38

Guolin Ke's avatar
Guolin Ke committed
39
40
namespace LightGBM {

Guolin Ke's avatar
Guolin Ke committed
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
inline int LGBM_APIHandleException(const std::exception& ex) {
  LGBM_SetLastError(ex.what());
  return -1;
}
inline int LGBM_APIHandleException(const std::string& ex) {
  LGBM_SetLastError(ex.c_str());
  return -1;
}

#define API_BEGIN() try {
#define API_END() } \
catch(std::exception& ex) { return LGBM_APIHandleException(ex); } \
catch(std::string& ex) { return LGBM_APIHandleException(ex); } \
catch(...) { return LGBM_APIHandleException("unknown exception"); } \
return 0;

57
58
59
60
61
62
#define UNIQUE_LOCK(mtx) \
std::unique_lock<yamc::alternate::shared_mutex> lock(mtx);

#define SHARED_LOCK(mtx) \
yamc::shared_lock<yamc::alternate::shared_mutex> lock(&mtx);

63
64
65
const int PREDICTOR_TYPES = 4;

// Single row predictor to abstract away caching logic
66
class SingleRowPredictorInner {
67
68
69
70
 public:
  PredictFunction predict_function;
  int64_t num_pred_in_one_row;

71
  SingleRowPredictorInner(int predict_type, Boosting* boosting, const Config& config, int start_iter, int num_iter) {
72
73
74
75
76
77
78
79
80
81
82
83
84
    bool is_predict_leaf = false;
    bool is_raw_score = false;
    bool predict_contrib = false;
    if (predict_type == C_API_PREDICT_LEAF_INDEX) {
      is_predict_leaf = true;
    } else if (predict_type == C_API_PREDICT_RAW_SCORE) {
      is_raw_score = true;
    } else if (predict_type == C_API_PREDICT_CONTRIB) {
      predict_contrib = true;
    }
    early_stop_ = config.pred_early_stop;
    early_stop_freq_ = config.pred_early_stop_freq;
    early_stop_margin_ = config.pred_early_stop_margin;
85
86
    iter_ = num_iter;
    predictor_.reset(new Predictor(boosting, start_iter, iter_, is_raw_score, is_predict_leaf, predict_contrib,
87
                                   early_stop_, early_stop_freq_, early_stop_margin_));
88
    num_pred_in_one_row = boosting->NumPredictOneRow(start_iter, iter_, is_predict_leaf, predict_contrib);
89
    predict_function = predictor_->GetPredictFunction();
Guolin Ke's avatar
Guolin Ke committed
90
    num_total_model_ = boosting->NumberOfTotalModel();
91
  }
92

93
  ~SingleRowPredictorInner() {}
94

Guolin Ke's avatar
Guolin Ke committed
95
  bool IsPredictorEqual(const Config& config, int iter, Boosting* boosting) {
96
97
98
99
100
    return early_stop_ == config.pred_early_stop &&
      early_stop_freq_ == config.pred_early_stop_freq &&
      early_stop_margin_ == config.pred_early_stop_margin &&
      iter_ == iter &&
      num_total_model_ == boosting->NumberOfTotalModel();
101
  }
Guolin Ke's avatar
Guolin Ke committed
102

103
104
105
106
107
108
109
110
111
 private:
  std::unique_ptr<Predictor> predictor_;
  bool early_stop_;
  int early_stop_freq_;
  double early_stop_margin_;
  int iter_;
  int num_total_model_;
};

112
113
/*!
 * \brief Object to store resources meant for single-row Fast Predict methods.
114
 *
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
 * For legacy reasons this is called `FastConfig` in the public C API.
 *
 * Meant to be used by the *Fast* predict methods only.
 * It stores the configuration and prediction resources for reuse across predictions.
 */
struct SingleRowPredictor {
 public:
  SingleRowPredictor(yamc::alternate::shared_mutex *booster_mutex,
             const char *parameters,
             const int data_type,
             const int32_t num_cols,
             int predict_type,
             Boosting *boosting,
             int start_iter,
             int num_iter) : config(Config::Str2Map(parameters)), data_type(data_type), num_cols(num_cols), single_row_predictor_inner(predict_type, boosting, config, start_iter, num_iter), booster_mutex(booster_mutex) {
    if (!config.predict_disable_shape_check && num_cols != boosting->MaxFeatureIdx() + 1) {
      Log::Fatal("The number of features in data (%d) is not the same as it was in training data (%d).\n"\
                 "You can set ``predict_disable_shape_check=true`` to discard this error, but please be aware what you are doing.", num_cols, boosting->MaxFeatureIdx() + 1);
    }
  }

  void Predict(std::function<std::vector<std::pair<int, double>>(int row_idx)> get_row_fun,
               double* out_result, int64_t* out_len) const {
    UNIQUE_LOCK(single_row_predictor_mutex)
    yamc::shared_lock<yamc::alternate::shared_mutex> booster_shared_lock(booster_mutex);

    auto one_row = get_row_fun(0);
    single_row_predictor_inner.predict_function(one_row, out_result);

    *out_len = single_row_predictor_inner.num_pred_in_one_row;
  }

 public:
  Config config;
  const int data_type;
  const int32_t num_cols;

 private:
  SingleRowPredictorInner single_row_predictor_inner;

  // Prevent the booster from being modified while we have a predictor relying on it during prediction
  yamc::alternate::shared_mutex *booster_mutex;

  // If several threads try to predict at the same time using the same SingleRowPredictor
  // we want them to still provide correct values, so the mutex is necessary due to the shared
  // resources in the predictor.
  // However the recommended approach is to instantiate one SingleRowPredictor per thread,
  // to avoid contention here.
  mutable yamc::alternate::shared_mutex single_row_predictor_mutex;
};

Guolin Ke's avatar
Guolin Ke committed
166
class Booster {
Nikita Titov's avatar
Nikita Titov committed
167
 public:
Guolin Ke's avatar
Guolin Ke committed
168
  explicit Booster(const char* filename) {
169
    boosting_.reset(Boosting::CreateBoosting("gbdt", filename));
170
171
  }

Guolin Ke's avatar
Guolin Ke committed
172
  Booster(const Dataset* train_data,
173
          const char* parameters) {
Guolin Ke's avatar
Guolin Ke committed
174
    auto param = Config::Str2Map(parameters);
wxchan's avatar
wxchan committed
175
    config_.Set(param);
176
    OMP_SET_NUM_THREADS(config_.num_threads);
Guolin Ke's avatar
Guolin Ke committed
177
    // create boosting
Guolin Ke's avatar
Guolin Ke committed
178
    if (config_.input_model.size() > 0) {
179
180
      Log::Warning("Continued train from model is not supported for c_api,\n"
                   "please use continued train with input score");
Guolin Ke's avatar
Guolin Ke committed
181
    }
Guolin Ke's avatar
Guolin Ke committed
182

Guolin Ke's avatar
Guolin Ke committed
183
    boosting_.reset(Boosting::CreateBoosting(config_.boosting, nullptr));
Guolin Ke's avatar
Guolin Ke committed
184

185
186
    train_data_ = train_data;
    CreateObjectiveAndMetrics();
Guolin Ke's avatar
Guolin Ke committed
187
    // initialize the boosting
Guolin Ke's avatar
Guolin Ke committed
188
    if (config_.tree_learner == std::string("feature")) {
189
      Log::Fatal("Do not support feature parallel in c api");
190
    }
Guolin Ke's avatar
Guolin Ke committed
191
    if (Network::num_machines() == 1 && config_.tree_learner != std::string("serial")) {
192
      Log::Warning("Only find one worker, will switch to serial tree learner");
Guolin Ke's avatar
Guolin Ke committed
193
      config_.tree_learner = "serial";
194
    }
Guolin Ke's avatar
Guolin Ke committed
195
    boosting_->Init(&config_, train_data_, objective_fun_.get(),
196
                    Common::ConstPtrInVectorWrapper<Metric>(train_metric_));
wxchan's avatar
wxchan committed
197
198
199
  }

  void MergeFrom(const Booster* other) {
200
    UNIQUE_LOCK(mutex_)
wxchan's avatar
wxchan committed
201
    boosting_->MergeFrom(other->boosting_.get());
Guolin Ke's avatar
Guolin Ke committed
202
203
204
205
  }

  ~Booster() {
  }
206

207
  void CreateObjectiveAndMetrics() {
Guolin Ke's avatar
Guolin Ke committed
208
    // create objective function
Guolin Ke's avatar
Guolin Ke committed
209
210
    objective_fun_.reset(ObjectiveFunction::CreateObjectiveFunction(config_.objective,
                                                                    config_));
Guolin Ke's avatar
Guolin Ke committed
211
    if (objective_fun_ == nullptr) {
212
      Log::Info("Using self-defined objective function");
Guolin Ke's avatar
Guolin Ke committed
213
214
215
216
217
218
219
220
    }
    // initialize the objective function
    if (objective_fun_ != nullptr) {
      objective_fun_->Init(train_data_->metadata(), train_data_->num_data());
    }

    // create training metric
    train_metric_.clear();
Guolin Ke's avatar
Guolin Ke committed
221
    for (auto metric_type : config_.metric) {
Guolin Ke's avatar
Guolin Ke committed
222
      auto metric = std::unique_ptr<Metric>(
Guolin Ke's avatar
Guolin Ke committed
223
        Metric::CreateMetric(metric_type, config_));
224
225
226
      if (metric == nullptr) {
        continue;
      }
Guolin Ke's avatar
Guolin Ke committed
227
228
229
230
      metric->Init(train_data_->metadata(), train_data_->num_data());
      train_metric_.push_back(std::move(metric));
    }
    train_metric_.shrink_to_fit();
231
232
233
234
  }

  void ResetTrainingData(const Dataset* train_data) {
    if (train_data != train_data_) {
235
      UNIQUE_LOCK(mutex_)
236
237
238
239
240
241
      train_data_ = train_data;
      CreateObjectiveAndMetrics();
      // reset the boosting
      boosting_->ResetTrainingData(train_data_,
                                   objective_fun_.get(), Common::ConstPtrInVectorWrapper<Metric>(train_metric_));
    }
wxchan's avatar
wxchan committed
242
243
  }

244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
  static void CheckDatasetResetConfig(
      const Config& old_config,
      const std::unordered_map<std::string, std::string>& new_param) {
    Config new_config;
    new_config.Set(new_param);
    if (new_param.count("data_random_seed") &&
        new_config.data_random_seed != old_config.data_random_seed) {
      Log::Fatal("Cannot change data_random_seed after constructed Dataset handle.");
    }
    if (new_param.count("max_bin") &&
        new_config.max_bin != old_config.max_bin) {
      Log::Fatal("Cannot change max_bin after constructed Dataset handle.");
    }
    if (new_param.count("max_bin_by_feature") &&
        new_config.max_bin_by_feature != old_config.max_bin_by_feature) {
      Log::Fatal(
          "Cannot change max_bin_by_feature after constructed Dataset handle.");
    }
    if (new_param.count("bin_construct_sample_cnt") &&
        new_config.bin_construct_sample_cnt !=
            old_config.bin_construct_sample_cnt) {
      Log::Fatal(
          "Cannot change bin_construct_sample_cnt after constructed Dataset "
          "handle.");
    }
    if (new_param.count("min_data_in_bin") &&
        new_config.min_data_in_bin != old_config.min_data_in_bin) {
      Log::Fatal(
          "Cannot change min_data_in_bin after constructed Dataset handle.");
    }
    if (new_param.count("use_missing") &&
        new_config.use_missing != old_config.use_missing) {
      Log::Fatal("Cannot change use_missing after constructed Dataset handle.");
    }
    if (new_param.count("zero_as_missing") &&
        new_config.zero_as_missing != old_config.zero_as_missing) {
      Log::Fatal(
          "Cannot change zero_as_missing after constructed Dataset handle.");
    }
    if (new_param.count("categorical_feature") &&
        new_config.categorical_feature != old_config.categorical_feature) {
      Log::Fatal(
          "Cannot change categorical_feature after constructed Dataset "
          "handle.");
    }
    if (new_param.count("feature_pre_filter") &&
        new_config.feature_pre_filter != old_config.feature_pre_filter) {
      Log::Fatal(
          "Cannot change feature_pre_filter after constructed Dataset handle.");
    }
    if (new_param.count("is_enable_sparse") &&
        new_config.is_enable_sparse != old_config.is_enable_sparse) {
      Log::Fatal(
          "Cannot change is_enable_sparse after constructed Dataset handle.");
    }
    if (new_param.count("pre_partition") &&
        new_config.pre_partition != old_config.pre_partition) {
      Log::Fatal(
          "Cannot change pre_partition after constructed Dataset handle.");
    }
    if (new_param.count("enable_bundle") &&
        new_config.enable_bundle != old_config.enable_bundle) {
      Log::Fatal(
          "Cannot change enable_bundle after constructed Dataset handle.");
    }
    if (new_param.count("header") && new_config.header != old_config.header) {
      Log::Fatal("Cannot change header after constructed Dataset handle.");
    }
    if (new_param.count("two_round") &&
        new_config.two_round != old_config.two_round) {
      Log::Fatal("Cannot change two_round after constructed Dataset handle.");
    }
    if (new_param.count("label_column") &&
        new_config.label_column != old_config.label_column) {
      Log::Fatal(
          "Cannot change label_column after constructed Dataset handle.");
    }
    if (new_param.count("weight_column") &&
        new_config.weight_column != old_config.weight_column) {
      Log::Fatal(
          "Cannot change weight_column after constructed Dataset handle.");
    }
    if (new_param.count("group_column") &&
        new_config.group_column != old_config.group_column) {
      Log::Fatal(
          "Cannot change group_column after constructed Dataset handle.");
    }
    if (new_param.count("ignore_column") &&
        new_config.ignore_column != old_config.ignore_column) {
      Log::Fatal(
          "Cannot change ignore_column after constructed Dataset handle.");
    }
    if (new_param.count("forcedbins_filename")) {
      Log::Fatal("Cannot change forced bins after constructed Dataset handle.");
    }
    if (new_param.count("min_data_in_leaf") &&
        new_config.min_data_in_leaf < old_config.min_data_in_leaf &&
        old_config.feature_pre_filter) {
      Log::Fatal(
          "Reducing `min_data_in_leaf` with `feature_pre_filter=true` may "
          "cause unexpected behaviour "
          "for features that were pre-filtered by the larger "
          "`min_data_in_leaf`.\n"
          "You need to set `feature_pre_filter=false` to dynamically change "
          "the `min_data_in_leaf`.");
    }
Nikita Titov's avatar
Nikita Titov committed
350
    if (new_param.count("linear_tree") && new_config.linear_tree != old_config.linear_tree) {
351
      Log::Fatal("Cannot change linear_tree after constructed Dataset handle.");
352
    }
Nikita Titov's avatar
Nikita Titov committed
353
354
355
356
    if (new_param.count("precise_float_parser") &&
        new_config.precise_float_parser != old_config.precise_float_parser) {
      Log::Fatal("Cannot change precise_float_parser after constructed Dataset handle.");
    }
357
358
  }

wxchan's avatar
wxchan committed
359
  void ResetConfig(const char* parameters) {
360
    UNIQUE_LOCK(mutex_)
Guolin Ke's avatar
Guolin Ke committed
361
    auto param = Config::Str2Map(parameters);
362
363
364
    Config new_config;
    new_config.Set(param);
    if (param.count("num_class") && new_config.num_class != config_.num_class) {
365
      Log::Fatal("Cannot change num_class during training");
wxchan's avatar
wxchan committed
366
    }
367
    if (param.count("boosting") && new_config.boosting != config_.boosting) {
Guolin Ke's avatar
Guolin Ke committed
368
      Log::Fatal("Cannot change boosting during training");
wxchan's avatar
wxchan committed
369
    }
370
    if (param.count("metric") && new_config.metric != config_.metric) {
371
      Log::Fatal("Cannot change metric during training");
Guolin Ke's avatar
Guolin Ke committed
372
    }
373
374
    CheckDatasetResetConfig(config_, param);

Guolin Ke's avatar
Guolin Ke committed
375
    config_.Set(param);
376

377
    OMP_SET_NUM_THREADS(config_.num_threads);
Guolin Ke's avatar
Guolin Ke committed
378
379
380

    if (param.count("objective")) {
      // create objective function
Guolin Ke's avatar
Guolin Ke committed
381
382
      objective_fun_.reset(ObjectiveFunction::CreateObjectiveFunction(config_.objective,
                                                                      config_));
Guolin Ke's avatar
Guolin Ke committed
383
      if (objective_fun_ == nullptr) {
384
        Log::Info("Using self-defined objective function");
Guolin Ke's avatar
Guolin Ke committed
385
386
387
388
389
      }
      // initialize the objective function
      if (objective_fun_ != nullptr) {
        objective_fun_->Init(train_data_->metadata(), train_data_->num_data());
      }
390
391
      boosting_->ResetTrainingData(train_data_,
                                   objective_fun_.get(), Common::ConstPtrInVectorWrapper<Metric>(train_metric_));
wxchan's avatar
wxchan committed
392
    }
Guolin Ke's avatar
Guolin Ke committed
393

Guolin Ke's avatar
Guolin Ke committed
394
    boosting_->ResetConfig(&config_);
wxchan's avatar
wxchan committed
395
396
397
  }

  void AddValidData(const Dataset* valid_data) {
398
    UNIQUE_LOCK(mutex_)
wxchan's avatar
wxchan committed
399
    valid_metrics_.emplace_back();
Guolin Ke's avatar
Guolin Ke committed
400
401
    for (auto metric_type : config_.metric) {
      auto metric = std::unique_ptr<Metric>(Metric::CreateMetric(metric_type, config_));
402
403
404
      if (metric == nullptr) {
        continue;
      }
wxchan's avatar
wxchan committed
405
406
407
408
409
      metric->Init(valid_data->metadata(), valid_data->num_data());
      valid_metrics_.back().push_back(std::move(metric));
    }
    valid_metrics_.back().shrink_to_fit();
    boosting_->AddValidDataset(valid_data,
410
                               Common::ConstPtrInVectorWrapper<Metric>(valid_metrics_.back()));
wxchan's avatar
wxchan committed
411
  }
Guolin Ke's avatar
Guolin Ke committed
412

413
  bool TrainOneIter() {
414
    UNIQUE_LOCK(mutex_)
Guolin Ke's avatar
Guolin Ke committed
415
    return boosting_->TrainOneIter(nullptr, nullptr);
416
417
  }

Guolin Ke's avatar
Guolin Ke committed
418
  void Refit(const int32_t* leaf_preds, int32_t nrow, int32_t ncol) {
419
    UNIQUE_LOCK(mutex_)
420
    boosting_->RefitTree(leaf_preds, nrow, ncol);
Guolin Ke's avatar
Guolin Ke committed
421
422
  }

423
  bool TrainOneIter(const score_t* gradients, const score_t* hessians) {
424
    UNIQUE_LOCK(mutex_)
Guolin Ke's avatar
Guolin Ke committed
425
    return boosting_->TrainOneIter(gradients, hessians);
426
427
  }

wxchan's avatar
wxchan committed
428
  void RollbackOneIter() {
429
    UNIQUE_LOCK(mutex_)
wxchan's avatar
wxchan committed
430
431
432
    boosting_->RollbackOneIter();
  }

433
  void SetSingleRowPredictorInner(int start_iteration, int num_iteration, int predict_type, const Config& config) {
434
435
436
      UNIQUE_LOCK(mutex_)
      if (single_row_predictor_[predict_type].get() == nullptr ||
          !single_row_predictor_[predict_type]->IsPredictorEqual(config, num_iteration, boosting_.get())) {
437
        single_row_predictor_[predict_type].reset(new SingleRowPredictorInner(predict_type, boosting_.get(),
438
                                                                         config, start_iteration, num_iteration));
439
440
441
      }
  }

442
443
444
445
446
447
448
449
450
451
452
  std::unique_ptr<SingleRowPredictor> InitSingleRowPredictor(int predict_type, int start_iteration, int num_iteration, int data_type, int32_t num_cols, const char *parameters) {
    // Workaround https://github.com/microsoft/LightGBM/issues/6142 by locking here
    // This is only a workaround because if predictors are initialized differently it may still behave incorrectly,
    // and because multiple racing Predictor initializations through LGBM_BoosterPredictForMat suffers from that same issue of Predictor init writing things in the booster.
    // Once #6142 is fixed (predictor doesn't write in the Booster as should have been the case since 1c35c3b9ede9adab8ccc5fd7b4b2b6af188a79f0), this line can be removed.
    UNIQUE_LOCK(mutex_)

    return std::unique_ptr<SingleRowPredictor>(new SingleRowPredictor(
      &mutex_, parameters, data_type, num_cols, predict_type, boosting_.get(), start_iteration, num_iteration));
  }

453
  void PredictSingleRow(int predict_type, int ncol,
454
455
               std::function<std::vector<std::pair<int, double>>(int row_idx)> get_row_fun,
               const Config& config,
456
               double* out_result, int64_t* out_len) const {
457
458
459
    if (!config.predict_disable_shape_check && ncol != boosting_->MaxFeatureIdx() + 1) {
      Log::Fatal("The number of features in data (%d) is not the same as it was in training data (%d).\n"\
                 "You can set ``predict_disable_shape_check=true`` to discard this error, but please be aware what you are doing.", ncol, boosting_->MaxFeatureIdx() + 1);
460
    }
461
    UNIQUE_LOCK(mutex_)
462
    const auto& single_row_predictor = single_row_predictor_[predict_type];
463
464
    auto one_row = get_row_fun(0);
    auto pred_wrt_ptr = out_result;
465
    single_row_predictor->predict_function(one_row, pred_wrt_ptr);
466

467
    *out_len = single_row_predictor->num_pred_in_one_row;
468
469
  }

470
  std::shared_ptr<Predictor> CreatePredictor(int start_iteration, int num_iteration, int predict_type, int ncol, const Config& config) const {
471
472
473
    if (!config.predict_disable_shape_check && ncol != boosting_->MaxFeatureIdx() + 1) {
      Log::Fatal("The number of features in data (%d) is not the same as it was in training data (%d).\n" \
                 "You can set ``predict_disable_shape_check=true`` to discard this error, but please be aware what you are doing.", ncol, boosting_->MaxFeatureIdx() + 1);
474
    }
Guolin Ke's avatar
Guolin Ke committed
475
476
    bool is_predict_leaf = false;
    bool is_raw_score = false;
Guolin Ke's avatar
Guolin Ke committed
477
    bool predict_contrib = false;
Guolin Ke's avatar
Guolin Ke committed
478
    if (predict_type == C_API_PREDICT_LEAF_INDEX) {
Guolin Ke's avatar
Guolin Ke committed
479
      is_predict_leaf = true;
Guolin Ke's avatar
Guolin Ke committed
480
    } else if (predict_type == C_API_PREDICT_RAW_SCORE) {
Guolin Ke's avatar
Guolin Ke committed
481
      is_raw_score = true;
482
    } else if (predict_type == C_API_PREDICT_CONTRIB) {
Guolin Ke's avatar
Guolin Ke committed
483
      predict_contrib = true;
Guolin Ke's avatar
Guolin Ke committed
484
485
    } else {
      is_raw_score = false;
Guolin Ke's avatar
Guolin Ke committed
486
    }
Guolin Ke's avatar
Guolin Ke committed
487

488
    return std::make_shared<Predictor>(boosting_.get(), start_iteration, num_iteration, is_raw_score, is_predict_leaf, predict_contrib,
489
                        config.pred_early_stop, config.pred_early_stop_freq, config.pred_early_stop_margin);
490
491
  }

492
  void Predict(int start_iteration, int num_iteration, int predict_type, int nrow, int ncol,
493
494
               std::function<std::vector<std::pair<int, double>>(int row_idx)> get_row_fun,
               const Config& config,
495
496
               double* out_result, int64_t* out_len) const {
    SHARED_LOCK(mutex_);
497
    auto predictor = CreatePredictor(start_iteration, num_iteration, predict_type, ncol, config);
498
499
500
501
502
503
504
    bool is_predict_leaf = false;
    bool predict_contrib = false;
    if (predict_type == C_API_PREDICT_LEAF_INDEX) {
      is_predict_leaf = true;
    } else if (predict_type == C_API_PREDICT_CONTRIB) {
      predict_contrib = true;
    }
505
    int64_t num_pred_in_one_row = boosting_->NumPredictOneRow(start_iteration, num_iteration, is_predict_leaf, predict_contrib);
506
    auto pred_fun = predictor->GetPredictFunction();
507
    OMP_INIT_EX();
508
    #pragma omp parallel for num_threads(OMP_NUM_THREADS()) schedule(static)
Guolin Ke's avatar
Guolin Ke committed
509
    for (int i = 0; i < nrow; ++i) {
510
      OMP_LOOP_EX_BEGIN();
Guolin Ke's avatar
Guolin Ke committed
511
      auto one_row = get_row_fun(i);
Tony-Y's avatar
Tony-Y committed
512
      auto pred_wrt_ptr = out_result + static_cast<size_t>(num_pred_in_one_row) * i;
Guolin Ke's avatar
Guolin Ke committed
513
      pred_fun(one_row, pred_wrt_ptr);
514
      OMP_LOOP_EX_END();
Guolin Ke's avatar
Guolin Ke committed
515
    }
516
    OMP_THROW_EX();
517
    *out_len = num_pred_in_one_row * nrow;
Guolin Ke's avatar
Guolin Ke committed
518
519
  }

520
  void PredictSparse(int start_iteration, int num_iteration, int predict_type, int64_t nrow, int ncol,
521
522
523
524
                     std::function<std::vector<std::pair<int, double>>(int64_t row_idx)> get_row_fun,
                     const Config& config, int64_t* out_elements_size,
                     std::vector<std::vector<std::unordered_map<int, double>>>* agg_ptr,
                     int32_t** out_indices, void** out_data, int data_type,
525
                     bool* is_data_float32_ptr, int num_matrices) const {
526
    auto predictor = CreatePredictor(start_iteration, num_iteration, predict_type, ncol, config);
527
    auto pred_sparse_fun = predictor->GetPredictSparseFunction();
528
529
    std::vector<std::vector<std::unordered_map<int, double>>>& agg = *agg_ptr;
    OMP_INIT_EX();
530
    #pragma omp parallel for num_threads(OMP_NUM_THREADS()) schedule(static)
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
    for (int64_t i = 0; i < nrow; ++i) {
      OMP_LOOP_EX_BEGIN();
      auto one_row = get_row_fun(i);
      agg[i] = std::vector<std::unordered_map<int, double>>(num_matrices);
      pred_sparse_fun(one_row, &agg[i]);
      OMP_LOOP_EX_END();
    }
    OMP_THROW_EX();
    // calculate the nonzero data and indices size
    int64_t elements_size = 0;
    for (int64_t i = 0; i < static_cast<int64_t>(agg.size()); ++i) {
      auto row_vector = agg[i];
      for (int j = 0; j < static_cast<int>(row_vector.size()); ++j) {
        elements_size += static_cast<int64_t>(row_vector[j].size());
      }
    }
    *out_elements_size = elements_size;
    *is_data_float32_ptr = false;
    // allocate data and indices arrays
    if (data_type == C_API_DTYPE_FLOAT32) {
      *out_data = new float[elements_size];
      *is_data_float32_ptr = true;
    } else if (data_type == C_API_DTYPE_FLOAT64) {
      *out_data = new double[elements_size];
    } else {
      Log::Fatal("Unknown data type in PredictSparse");
      return;
    }
    *out_indices = new int32_t[elements_size];
  }

562
  void PredictSparseCSR(int start_iteration, int num_iteration, int predict_type, int64_t nrow, int ncol,
563
564
565
                        std::function<std::vector<std::pair<int, double>>(int64_t row_idx)> get_row_fun,
                        const Config& config,
                        int64_t* out_len, void** out_indptr, int indptr_type,
566
567
                        int32_t** out_indices, void** out_data, int data_type) const {
    SHARED_LOCK(mutex_);
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
    // Get the number of trees per iteration (for multiclass scenario we output multiple sparse matrices)
    int num_matrices = boosting_->NumModelPerIteration();
    bool is_indptr_int32 = false;
    bool is_data_float32 = false;
    int64_t indptr_size = (nrow + 1) * num_matrices;
    if (indptr_type == C_API_DTYPE_INT32) {
      *out_indptr = new int32_t[indptr_size];
      is_indptr_int32 = true;
    } else if (indptr_type == C_API_DTYPE_INT64) {
      *out_indptr = new int64_t[indptr_size];
    } else {
      Log::Fatal("Unknown indptr type in PredictSparseCSR");
      return;
    }
    // aggregated per row feature contribution results
    std::vector<std::vector<std::unordered_map<int, double>>> agg(nrow);
    int64_t elements_size = 0;
585
    PredictSparse(start_iteration, num_iteration, predict_type, nrow, ncol, get_row_fun, config, &elements_size, &agg,
586
587
588
                  out_indices, out_data, data_type, &is_data_float32, num_matrices);
    std::vector<int> row_sizes(num_matrices * nrow);
    std::vector<int64_t> row_matrix_offsets(num_matrices * nrow);
589
    std::vector<int64_t> matrix_offsets(num_matrices);
590
591
592
593
594
595
596
597
598
599
600
601
602
603
    int64_t row_vector_cnt = 0;
    for (int m = 0; m < num_matrices; ++m) {
      for (int64_t i = 0; i < static_cast<int64_t>(agg.size()); ++i) {
        auto row_vector = agg[i];
        auto row_vector_size = row_vector[m].size();
        // keep track of the row_vector sizes for parallelization
        row_sizes[row_vector_cnt] = static_cast<int>(row_vector_size);
        if (i == 0) {
          row_matrix_offsets[row_vector_cnt] = 0;
        } else {
          row_matrix_offsets[row_vector_cnt] = static_cast<int64_t>(row_sizes[row_vector_cnt - 1] + row_matrix_offsets[row_vector_cnt - 1]);
        }
        row_vector_cnt++;
      }
604
605
606
607
608
609
      if (m == 0) {
        matrix_offsets[m] = 0;
      }
      if (m + 1 < num_matrices) {
        matrix_offsets[m + 1] = static_cast<int64_t>(matrix_offsets[m] + row_matrix_offsets[row_vector_cnt - 1] + row_sizes[row_vector_cnt - 1]);
      }
610
611
612
613
614
615
616
617
618
619
620
621
    }
    // copy vector results to output for each row
    int64_t indptr_index = 0;
    for (int m = 0; m < num_matrices; ++m) {
      if (is_indptr_int32) {
        (reinterpret_cast<int32_t*>(*out_indptr))[indptr_index] = 0;
      } else {
        (reinterpret_cast<int64_t*>(*out_indptr))[indptr_index] = 0;
      }
      indptr_index++;
      int64_t matrix_start_index = m * static_cast<int64_t>(agg.size());
      OMP_INIT_EX();
622
      #pragma omp parallel for num_threads(OMP_NUM_THREADS()) schedule(static)
623
624
625
626
      for (int64_t i = 0; i < static_cast<int64_t>(agg.size()); ++i) {
        OMP_LOOP_EX_BEGIN();
        auto row_vector = agg[i];
        int64_t row_start_index = matrix_start_index + i;
627
        int64_t element_index = row_matrix_offsets[row_start_index] + matrix_offsets[m];
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
        int64_t indptr_loop_index = indptr_index + i;
        for (auto it = row_vector[m].begin(); it != row_vector[m].end(); ++it) {
          (*out_indices)[element_index] = it->first;
          if (is_data_float32) {
            (reinterpret_cast<float*>(*out_data))[element_index] = static_cast<float>(it->second);
          } else {
            (reinterpret_cast<double*>(*out_data))[element_index] = it->second;
          }
          element_index++;
        }
        int64_t indptr_value = row_matrix_offsets[row_start_index] + row_sizes[row_start_index];
        if (is_indptr_int32) {
          (reinterpret_cast<int32_t*>(*out_indptr))[indptr_loop_index] = static_cast<int32_t>(indptr_value);
        } else {
          (reinterpret_cast<int64_t*>(*out_indptr))[indptr_loop_index] = indptr_value;
        }
        OMP_LOOP_EX_END();
      }
      OMP_THROW_EX();
      indptr_index += static_cast<int64_t>(agg.size());
    }
    out_len[0] = elements_size;
    out_len[1] = indptr_size;
  }

653
  void PredictSparseCSC(int start_iteration, int num_iteration, int predict_type, int64_t nrow, int ncol,
654
655
656
                        std::function<std::vector<std::pair<int, double>>(int64_t row_idx)> get_row_fun,
                        const Config& config,
                        int64_t* out_len, void** out_col_ptr, int col_ptr_type,
657
658
                        int32_t** out_indices, void** out_data, int data_type) const {
    SHARED_LOCK(mutex_);
659
660
    // Get the number of trees per iteration (for multiclass scenario we output multiple sparse matrices)
    int num_matrices = boosting_->NumModelPerIteration();
661
    auto predictor = CreatePredictor(start_iteration, num_iteration, predict_type, ncol, config);
662
    auto pred_sparse_fun = predictor->GetPredictSparseFunction();
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
    bool is_col_ptr_int32 = false;
    bool is_data_float32 = false;
    int num_output_cols = ncol + 1;
    int col_ptr_size = (num_output_cols + 1) * num_matrices;
    if (col_ptr_type == C_API_DTYPE_INT32) {
      *out_col_ptr = new int32_t[col_ptr_size];
      is_col_ptr_int32 = true;
    } else if (col_ptr_type == C_API_DTYPE_INT64) {
      *out_col_ptr = new int64_t[col_ptr_size];
    } else {
      Log::Fatal("Unknown col_ptr type in PredictSparseCSC");
      return;
    }
    // aggregated per row feature contribution results
    std::vector<std::vector<std::unordered_map<int, double>>> agg(nrow);
    int64_t elements_size = 0;
679
    PredictSparse(start_iteration, num_iteration, predict_type, nrow, ncol, get_row_fun, config, &elements_size, &agg,
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
                  out_indices, out_data, data_type, &is_data_float32, num_matrices);
    // calculate number of elements per column to construct
    // the CSC matrix with random access
    std::vector<std::vector<int64_t>> column_sizes(num_matrices);
    for (int m = 0; m < num_matrices; ++m) {
      column_sizes[m] = std::vector<int64_t>(num_output_cols, 0);
      for (int64_t i = 0; i < static_cast<int64_t>(agg.size()); ++i) {
        auto row_vector = agg[i];
        for (auto it = row_vector[m].begin(); it != row_vector[m].end(); ++it) {
          column_sizes[m][it->first] += 1;
        }
      }
    }
    // keep track of column counts
    std::vector<std::vector<int64_t>> column_counts(num_matrices);
    // keep track of beginning index for each column
    std::vector<std::vector<int64_t>> column_start_indices(num_matrices);
    // keep track of beginning index for each matrix
    std::vector<int64_t> matrix_start_indices(num_matrices, 0);
    int col_ptr_index = 0;
    for (int m = 0; m < num_matrices; ++m) {
      int64_t col_ptr_value = 0;
      column_start_indices[m] = std::vector<int64_t>(num_output_cols, 0);
      column_counts[m] = std::vector<int64_t>(num_output_cols, 0);
      if (is_col_ptr_int32) {
        (reinterpret_cast<int32_t*>(*out_col_ptr))[col_ptr_index] = static_cast<int32_t>(col_ptr_value);
      } else {
        (reinterpret_cast<int64_t*>(*out_col_ptr))[col_ptr_index] = col_ptr_value;
      }
      col_ptr_index++;
      for (int64_t i = 1; i < static_cast<int64_t>(column_sizes[m].size()); ++i) {
        column_start_indices[m][i] = column_sizes[m][i - 1] + column_start_indices[m][i - 1];
        if (is_col_ptr_int32) {
          (reinterpret_cast<int32_t*>(*out_col_ptr))[col_ptr_index] = static_cast<int32_t>(column_start_indices[m][i]);
        } else {
          (reinterpret_cast<int64_t*>(*out_col_ptr))[col_ptr_index] = column_start_indices[m][i];
        }
        col_ptr_index++;
      }
      int64_t last_elem_index = static_cast<int64_t>(column_sizes[m].size()) - 1;
      int64_t last_column_start_index = column_start_indices[m][last_elem_index];
      int64_t last_column_size = column_sizes[m][last_elem_index];
      if (is_col_ptr_int32) {
        (reinterpret_cast<int32_t*>(*out_col_ptr))[col_ptr_index] = static_cast<int32_t>(last_column_start_index + last_column_size);
      } else {
        (reinterpret_cast<int64_t*>(*out_col_ptr))[col_ptr_index] = last_column_start_index + last_column_size;
      }
727
728
      if (m + 1 < num_matrices) {
        matrix_start_indices[m + 1] = matrix_start_indices[m] + last_column_start_index + last_column_size;
729
      }
730
      col_ptr_index++;
731
    }
732
733
    // Note: we parallelize across matrices instead of rows because of the column_counts[m][col_idx] increment inside the loop
    OMP_INIT_EX();
734
    #pragma omp parallel for num_threads(OMP_NUM_THREADS()) schedule(static)
735
    for (int m = 0; m < num_matrices; ++m) {
736
      OMP_LOOP_EX_BEGIN();
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
      for (int64_t i = 0; i < static_cast<int64_t>(agg.size()); ++i) {
        auto row_vector = agg[i];
        for (auto it = row_vector[m].begin(); it != row_vector[m].end(); ++it) {
          int64_t col_idx = it->first;
          int64_t element_index = column_start_indices[m][col_idx] +
            matrix_start_indices[m] +
            column_counts[m][col_idx];
          // store the row index
          (*out_indices)[element_index] = static_cast<int32_t>(i);
          // update column count
          column_counts[m][col_idx]++;
          if (is_data_float32) {
            (reinterpret_cast<float*>(*out_data))[element_index] = static_cast<float>(it->second);
          } else {
            (reinterpret_cast<double*>(*out_data))[element_index] = it->second;
          }
        }
      }
755
      OMP_LOOP_EX_END();
756
    }
757
    OMP_THROW_EX();
758
759
760
761
    out_len[0] = elements_size;
    out_len[1] = col_ptr_size;
  }

762
  void Predict(int start_iteration, int num_iteration, int predict_type, const char* data_filename,
Guolin Ke's avatar
Guolin Ke committed
763
               int data_has_header, const Config& config,
764
765
               const char* result_filename) const {
    SHARED_LOCK(mutex_)
Guolin Ke's avatar
Guolin Ke committed
766
767
    bool is_predict_leaf = false;
    bool is_raw_score = false;
Guolin Ke's avatar
Guolin Ke committed
768
    bool predict_contrib = false;
Guolin Ke's avatar
Guolin Ke committed
769
770
771
772
    if (predict_type == C_API_PREDICT_LEAF_INDEX) {
      is_predict_leaf = true;
    } else if (predict_type == C_API_PREDICT_RAW_SCORE) {
      is_raw_score = true;
773
    } else if (predict_type == C_API_PREDICT_CONTRIB) {
Guolin Ke's avatar
Guolin Ke committed
774
      predict_contrib = true;
Guolin Ke's avatar
Guolin Ke committed
775
776
777
    } else {
      is_raw_score = false;
    }
778
    Predictor predictor(boosting_.get(), start_iteration, num_iteration, is_raw_score, is_predict_leaf, predict_contrib,
779
                        config.pred_early_stop, config.pred_early_stop_freq, config.pred_early_stop_margin);
Guolin Ke's avatar
Guolin Ke committed
780
    bool bool_data_has_header = data_has_header > 0 ? true : false;
Chen Yufei's avatar
Chen Yufei committed
781
782
    predictor.Predict(data_filename, result_filename, bool_data_has_header, config.predict_disable_shape_check,
                      config.precise_float_parser);
Guolin Ke's avatar
Guolin Ke committed
783
784
  }

785
  void GetPredictAt(int data_idx, double* out_result, int64_t* out_len) const {
wxchan's avatar
wxchan committed
786
787
788
    boosting_->GetPredictAt(data_idx, out_result, out_len);
  }

789
  void SaveModelToFile(int start_iteration, int num_iteration, int feature_importance_type, const char* filename) const {
790
    boosting_->SaveModelToFile(start_iteration, num_iteration, feature_importance_type, filename);
Guolin Ke's avatar
Guolin Ke committed
791
  }
792

793
  void LoadModelFromString(const char* model_str) {
794
795
    size_t len = std::strlen(model_str);
    boosting_->LoadModelFromString(model_str, len);
796
797
  }

798
  std::string SaveModelToString(int start_iteration, int num_iteration,
799
                                int feature_importance_type) const {
800
801
    return boosting_->SaveModelToString(start_iteration,
                                        num_iteration, feature_importance_type);
802
803
  }

804
  std::string DumpModel(int start_iteration, int num_iteration,
805
                        int feature_importance_type) const {
806
807
    return boosting_->DumpModel(start_iteration, num_iteration,
                                feature_importance_type);
wxchan's avatar
wxchan committed
808
  }
809

810
  std::vector<double> FeatureImportance(int num_iteration, int importance_type) const {
811
812
813
    return boosting_->FeatureImportance(num_iteration, importance_type);
  }

814
  double UpperBoundValue() const {
815
    SHARED_LOCK(mutex_)
816
817
818
819
    return boosting_->GetUpperBoundValue();
  }

  double LowerBoundValue() const {
820
    SHARED_LOCK(mutex_)
821
822
823
    return boosting_->GetLowerBoundValue();
  }

Guolin Ke's avatar
Guolin Ke committed
824
  double GetLeafValue(int tree_idx, int leaf_idx) const {
825
    SHARED_LOCK(mutex_)
Guolin Ke's avatar
Guolin Ke committed
826
    return dynamic_cast<GBDTBase*>(boosting_.get())->GetLeafValue(tree_idx, leaf_idx);
Guolin Ke's avatar
Guolin Ke committed
827
828
829
  }

  void SetLeafValue(int tree_idx, int leaf_idx, double val) {
830
    UNIQUE_LOCK(mutex_)
Guolin Ke's avatar
Guolin Ke committed
831
    dynamic_cast<GBDTBase*>(boosting_.get())->SetLeafValue(tree_idx, leaf_idx, val);
Guolin Ke's avatar
Guolin Ke committed
832
833
  }

834
  void ShuffleModels(int start_iter, int end_iter) {
835
    UNIQUE_LOCK(mutex_)
836
    boosting_->ShuffleModels(start_iter, end_iter);
837
838
  }

wxchan's avatar
wxchan committed
839
  int GetEvalCounts() const {
840
    SHARED_LOCK(mutex_)
wxchan's avatar
wxchan committed
841
842
843
844
845
846
    int ret = 0;
    for (const auto& metric : train_metric_) {
      ret += static_cast<int>(metric->GetName().size());
    }
    return ret;
  }
847

848
  int GetEvalNames(char** out_strs, const int len, const size_t buffer_len, size_t *out_buffer_len) const {
849
    SHARED_LOCK(mutex_)
850
    *out_buffer_len = 0;
wxchan's avatar
wxchan committed
851
852
853
    int idx = 0;
    for (const auto& metric : train_metric_) {
      for (const auto& name : metric->GetName()) {
854
855
856
857
858
        if (idx < len) {
          std::memcpy(out_strs[idx], name.c_str(), std::min(name.size() + 1, buffer_len));
          out_strs[idx][buffer_len - 1] = '\0';
        }
        *out_buffer_len = std::max(name.size() + 1, *out_buffer_len);
wxchan's avatar
wxchan committed
859
860
861
862
863
864
        ++idx;
      }
    }
    return idx;
  }

865
  int GetFeatureNames(char** out_strs, const int len, const size_t buffer_len, size_t *out_buffer_len) const {
866
    SHARED_LOCK(mutex_)
867
    *out_buffer_len = 0;
wxchan's avatar
wxchan committed
868
869
    int idx = 0;
    for (const auto& name : boosting_->FeatureNames()) {
870
871
872
873
874
      if (idx < len) {
        std::memcpy(out_strs[idx], name.c_str(), std::min(name.size() + 1, buffer_len));
        out_strs[idx][buffer_len - 1] = '\0';
      }
      *out_buffer_len = std::max(name.size() + 1, *out_buffer_len);
wxchan's avatar
wxchan committed
875
876
877
878
879
      ++idx;
    }
    return idx;
  }

wxchan's avatar
wxchan committed
880
  const Boosting* GetBoosting() const { return boosting_.get(); }
Guolin Ke's avatar
Guolin Ke committed
881

Nikita Titov's avatar
Nikita Titov committed
882
 private:
wxchan's avatar
wxchan committed
883
  const Dataset* train_data_;
Guolin Ke's avatar
Guolin Ke committed
884
  std::unique_ptr<Boosting> boosting_;
885
  std::unique_ptr<SingleRowPredictorInner> single_row_predictor_[PREDICTOR_TYPES];
886

Guolin Ke's avatar
Guolin Ke committed
887
  /*! \brief All configs */
Guolin Ke's avatar
Guolin Ke committed
888
  Config config_;
Guolin Ke's avatar
Guolin Ke committed
889
  /*! \brief Metric for training data */
Guolin Ke's avatar
Guolin Ke committed
890
  std::vector<std::unique_ptr<Metric>> train_metric_;
Guolin Ke's avatar
Guolin Ke committed
891
  /*! \brief Metrics for validation data */
Guolin Ke's avatar
Guolin Ke committed
892
  std::vector<std::vector<std::unique_ptr<Metric>>> valid_metrics_;
Guolin Ke's avatar
Guolin Ke committed
893
  /*! \brief Training objective function */
Guolin Ke's avatar
Guolin Ke committed
894
  std::unique_ptr<ObjectiveFunction> objective_fun_;
wxchan's avatar
wxchan committed
895
  /*! \brief mutex for threading safe call */
896
  mutable yamc::alternate::shared_mutex mutex_;
Guolin Ke's avatar
Guolin Ke committed
897
898
};

899
}  // namespace LightGBM
Guolin Ke's avatar
Guolin Ke committed
900

901
902
// explicitly declare symbols from LightGBM namespace
using LightGBM::AllgatherFunction;
903
using LightGBM::ArrowChunkedArray;
904
using LightGBM::ArrowTable;
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
using LightGBM::Booster;
using LightGBM::Common::CheckElementsIntervalClosed;
using LightGBM::Common::RemoveQuotationSymbol;
using LightGBM::Common::Vector2Ptr;
using LightGBM::Common::VectorSize;
using LightGBM::Config;
using LightGBM::data_size_t;
using LightGBM::Dataset;
using LightGBM::DatasetLoader;
using LightGBM::kZeroThreshold;
using LightGBM::LGBM_APIHandleException;
using LightGBM::Log;
using LightGBM::Network;
using LightGBM::Random;
using LightGBM::ReduceScatterFunction;
920
using LightGBM::SingleRowPredictor;
Guolin Ke's avatar
Guolin Ke committed
921

Guolin Ke's avatar
Guolin Ke committed
922
923
924
925
926
927
928
929
// some help functions used to convert data

std::function<std::vector<double>(int row_idx)>
RowFunctionFromDenseMatric(const void* data, int num_row, int num_col, int data_type, int is_row_major);

std::function<std::vector<std::pair<int, double>>(int row_idx)>
RowPairFunctionFromDenseMatric(const void* data, int num_row, int num_col, int data_type, int is_row_major);

930
931
932
std::function<std::vector<std::pair<int, double>>(int row_idx)>
RowPairFunctionFromDenseRows(const void** data, int num_col, int data_type);

933
934
template<typename T>
std::function<std::vector<std::pair<int, double>>(T idx)>
Guolin Ke's avatar
Guolin Ke committed
935
RowFunctionFromCSR(const void* indptr, int indptr_type, const int32_t* indices,
936
                   const void* data, int data_type, int64_t nindptr, int64_t nelem);
Guolin Ke's avatar
Guolin Ke committed
937
938
939

// Row iterator of on column for CSC matrix
class CSC_RowIterator {
Nikita Titov's avatar
Nikita Titov committed
940
 public:
Guolin Ke's avatar
Guolin Ke committed
941
  CSC_RowIterator(const void* col_ptr, int col_ptr_type, const int32_t* indices,
942
                  const void* data, int data_type, int64_t ncol_ptr, int64_t nelem, int col_idx);
Guolin Ke's avatar
Guolin Ke committed
943
944
945
946
947
  ~CSC_RowIterator() {}
  // return value at idx, only can access by ascent order
  double Get(int idx);
  // return next non-zero pair, if index < 0, means no more data
  std::pair<int, double> NextNonZero();
Nikita Titov's avatar
Nikita Titov committed
948
949

 private:
Guolin Ke's avatar
Guolin Ke committed
950
951
952
953
954
955
956
957
958
  int nonzero_idx_ = 0;
  int cur_idx_ = -1;
  double cur_val_ = 0.0f;
  bool is_end_ = false;
  std::function<std::pair<int, double>(int idx)> iter_fun_;
};

// start of c_api functions

Guolin Ke's avatar
Guolin Ke committed
959
const char* LGBM_GetLastError() {
wxchan's avatar
wxchan committed
960
  return LastErrorMsg();
Guolin Ke's avatar
Guolin Ke committed
961
962
}

963
964
965
966
967
968
969
970
971
972
973
974
int LGBM_DumpParamAliases(int64_t buffer_len,
                          int64_t* out_len,
                          char* out_str) {
  API_BEGIN();
  std::string aliases = Config::DumpAliases();
  *out_len = static_cast<int64_t>(aliases.size()) + 1;
  if (*out_len <= buffer_len) {
    std::memcpy(out_str, aliases.c_str(), *out_len);
  }
  API_END();
}

975
976
977
978
979
980
int LGBM_RegisterLogCallback(void (*callback)(const char*)) {
  API_BEGIN();
  Log::ResetCallBack(callback);
  API_END();
}

981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
static inline int SampleCount(int32_t total_nrow, const Config& config) {
  return static_cast<int>(total_nrow < config.bin_construct_sample_cnt ? total_nrow : config.bin_construct_sample_cnt);
}

static inline std::vector<int32_t> CreateSampleIndices(int32_t total_nrow, const Config& config) {
  Random rand(config.data_random_seed);
  int sample_cnt = SampleCount(total_nrow, config);
  return rand.Sample(total_nrow, sample_cnt);
}

int LGBM_GetSampleCount(int32_t num_total_row,
                        const char* parameters,
                        int* out) {
  API_BEGIN();
  if (out == nullptr) {
    Log::Fatal("LGBM_GetSampleCount output is nullptr");
  }
  auto param = Config::Str2Map(parameters);
  Config config;
  config.Set(param);

  *out = SampleCount(num_total_row, config);
  API_END();
}

int LGBM_SampleIndices(int32_t num_total_row,
                       const char* parameters,
                       void* out,
                       int32_t* out_len) {
  // This API is to keep python binding's behavior the same with C++ implementation.
  // Sample count, random seed etc. should be provided in parameters.
  API_BEGIN();
  if (out == nullptr) {
    Log::Fatal("LGBM_SampleIndices output is nullptr");
  }
  auto param = Config::Str2Map(parameters);
  Config config;
  config.Set(param);

  auto sample_indices = CreateSampleIndices(num_total_row, config);
  memcpy(out, sample_indices.data(), sizeof(int32_t) * sample_indices.size());
  *out_len = static_cast<int32_t>(sample_indices.size());
  API_END();
}

1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
int LGBM_ByteBufferGetAt(ByteBufferHandle handle, int32_t index, uint8_t* out_val) {
  API_BEGIN();
  LightGBM::ByteBuffer* byteBuffer = reinterpret_cast<LightGBM::ByteBuffer*>(handle);
  *out_val = byteBuffer->GetAt(index);
  API_END();
}

int LGBM_ByteBufferFree(ByteBufferHandle handle) {
  API_BEGIN();
  delete reinterpret_cast<LightGBM::ByteBuffer*>(handle);
  API_END();
}

Guolin Ke's avatar
Guolin Ke committed
1039
int LGBM_DatasetCreateFromFile(const char* filename,
1040
1041
1042
                               const char* parameters,
                               const DatasetHandle reference,
                               DatasetHandle* out) {
1043
  API_BEGIN();
Guolin Ke's avatar
Guolin Ke committed
1044
1045
  auto param = Config::Str2Map(parameters);
  Config config;
1046
  config.Set(param);
1047
  OMP_SET_NUM_THREADS(config.num_threads);
1048
  DatasetLoader loader(config, nullptr, 1, filename);
Guolin Ke's avatar
Guolin Ke committed
1049
  if (reference == nullptr) {
1050
    if (Network::num_machines() == 1) {
1051
      *out = loader.LoadFromFile(filename);
1052
    } else {
1053
      *out = loader.LoadFromFile(filename, Network::rank(), Network::num_machines());
1054
    }
Guolin Ke's avatar
Guolin Ke committed
1055
  } else {
1056
    *out = loader.LoadFromFileAlignWithOtherDataset(filename,
1057
                                                    reinterpret_cast<const Dataset*>(reference));
Guolin Ke's avatar
Guolin Ke committed
1058
  }
1059
  API_END();
Guolin Ke's avatar
Guolin Ke committed
1060
1061
}

Guolin Ke's avatar
Guolin Ke committed
1062
int LGBM_DatasetCreateFromSampledColumn(double** sample_data,
1063
1064
1065
1066
                                        int** sample_indices,
                                        int32_t ncol,
                                        const int* num_per_col,
                                        int32_t num_sample_row,
1067
1068
                                        int32_t num_local_row,
                                        int64_t num_dist_row,
1069
1070
                                        const char* parameters,
                                        DatasetHandle* out) {
1071
  API_BEGIN();
Guolin Ke's avatar
Guolin Ke committed
1072
1073
  auto param = Config::Str2Map(parameters);
  Config config;
1074
  config.Set(param);
1075
  OMP_SET_NUM_THREADS(config.num_threads);
Guolin Ke's avatar
Guolin Ke committed
1076
  DatasetLoader loader(config, nullptr, 1, nullptr);
1077
1078
1079
1080
  *out = loader.ConstructFromSampleData(sample_data,
                                        sample_indices,
                                        ncol,
                                        num_per_col,
1081
                                        num_sample_row,
1082
1083
                                        static_cast<data_size_t>(num_local_row),
                                        num_dist_row);
1084
  API_END();
Guolin Ke's avatar
Guolin Ke committed
1085
1086
}

Guolin Ke's avatar
Guolin Ke committed
1087
int LGBM_DatasetCreateByReference(const DatasetHandle reference,
1088
1089
                                  int64_t num_total_row,
                                  DatasetHandle* out) {
Guolin Ke's avatar
Guolin Ke committed
1090
1091
  API_BEGIN();
  std::unique_ptr<Dataset> ret;
1092
1093
1094
1095
1096
  data_size_t nrows = static_cast<data_size_t>(num_total_row);
  ret.reset(new Dataset(nrows));
  const Dataset* reference_dataset = reinterpret_cast<const Dataset*>(reference);
  ret->CreateValid(reference_dataset);
  ret->InitByReference(nrows, reference_dataset);
Guolin Ke's avatar
Guolin Ke committed
1097
1098
1099
1100
  *out = ret.release();
  API_END();
}

1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
int LGBM_DatasetCreateFromSerializedReference(const void* ref_buffer,
                                              int32_t ref_buffer_size,
                                              int64_t num_row,
                                              int32_t num_classes,
                                              const char* parameters,
                                              DatasetHandle* out) {
  API_BEGIN();
  auto param = Config::Str2Map(parameters);
  Config config;
  config.Set(param);
  OMP_SET_NUM_THREADS(config.num_threads);
  DatasetLoader loader(config, nullptr, 1, nullptr);
  *out = loader.LoadFromSerializedReference(static_cast<const char*>(ref_buffer),
    static_cast<size_t>(ref_buffer_size),
    static_cast<data_size_t>(num_row),
    num_classes);
  API_END();
}

1120
1121
1122
1123
1124
int LGBM_DatasetInitStreaming(DatasetHandle dataset,
                              int32_t has_weights,
                              int32_t has_init_scores,
                              int32_t has_queries,
                              int32_t nclasses,
1125
1126
                              int32_t nthreads,
                              int32_t omp_max_threads) {
1127
1128
1129
  API_BEGIN();
  auto p_dataset = reinterpret_cast<Dataset*>(dataset);
  auto num_data = p_dataset->num_data();
1130
  p_dataset->InitStreaming(num_data, has_weights, has_init_scores, has_queries, nclasses, nthreads, omp_max_threads);
1131
1132
1133
1134
  p_dataset->set_wait_for_manual_finish(true);
  API_END();
}

Guolin Ke's avatar
Guolin Ke committed
1135
int LGBM_DatasetPushRows(DatasetHandle dataset,
1136
1137
1138
1139
1140
                         const void* data,
                         int data_type,
                         int32_t nrow,
                         int32_t ncol,
                         int32_t start_row) {
Guolin Ke's avatar
Guolin Ke committed
1141
1142
1143
  API_BEGIN();
  auto p_dataset = reinterpret_cast<Dataset*>(dataset);
  auto get_row_fun = RowFunctionFromDenseMatric(data, nrow, ncol, data_type, 1);
1144
1145
1146
  if (p_dataset->has_raw()) {
    p_dataset->ResizeRaw(p_dataset->num_numeric_features() + nrow);
  }
1147
  OMP_INIT_EX();
1148
  #pragma omp parallel for num_threads(OMP_NUM_THREADS()) schedule(static)
Guolin Ke's avatar
Guolin Ke committed
1149
  for (int i = 0; i < nrow; ++i) {
1150
    OMP_LOOP_EX_BEGIN();
Guolin Ke's avatar
Guolin Ke committed
1151
1152
1153
    const int tid = omp_get_thread_num();
    auto one_row = get_row_fun(i);
    p_dataset->PushOneRow(tid, start_row + i, one_row);
1154
    OMP_LOOP_EX_END();
Guolin Ke's avatar
Guolin Ke committed
1155
  }
1156
  OMP_THROW_EX();
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
  if (!p_dataset->wait_for_manual_finish() && (start_row + nrow == p_dataset->num_data())) {
    p_dataset->FinishLoad();
  }
  API_END();
}

int LGBM_DatasetPushRowsWithMetadata(DatasetHandle dataset,
                                     const void* data,
                                     int data_type,
                                     int32_t nrow,
                                     int32_t ncol,
                                     int32_t start_row,
                                     const float* labels,
                                     const float* weights,
                                     const double* init_scores,
                                     const int32_t* queries,
                                     int32_t tid) {
  API_BEGIN();
#ifdef LABEL_T_USE_DOUBLE
  Log::Fatal("Don't support LABEL_T_USE_DOUBLE");
#endif
  if (!data) {
    Log::Fatal("data cannot be null.");
  }
  auto p_dataset = reinterpret_cast<Dataset*>(dataset);
  auto get_row_fun = RowFunctionFromDenseMatric(data, nrow, ncol, data_type, 1);
  if (p_dataset->has_raw()) {
    p_dataset->ResizeRaw(p_dataset->num_numeric_features() + nrow);
  }

1187
1188
  const int max_omp_threads = p_dataset->omp_max_threads() > 0 ? p_dataset->omp_max_threads() : OMP_NUM_THREADS();

1189
  OMP_INIT_EX();
1190
#pragma omp parallel for num_threads(OMP_NUM_THREADS()) schedule(static)
1191
1192
1193
  for (int i = 0; i < nrow; ++i) {
    OMP_LOOP_EX_BEGIN();
    // convert internal thread id to be unique based on external thread id
1194
    const int internal_tid = omp_get_thread_num() + (max_omp_threads * tid);
1195
1196
1197
1198
1199
1200
1201
1202
1203
    auto one_row = get_row_fun(i);
    p_dataset->PushOneRow(internal_tid, start_row + i, one_row);
    OMP_LOOP_EX_END();
  }
  OMP_THROW_EX();

  p_dataset->InsertMetadataAt(start_row, nrow, labels, weights, init_scores, queries);

  if (!p_dataset->wait_for_manual_finish() && (start_row + nrow == p_dataset->num_data())) {
Guolin Ke's avatar
Guolin Ke committed
1204
1205
1206
1207
1208
    p_dataset->FinishLoad();
  }
  API_END();
}

Guolin Ke's avatar
Guolin Ke committed
1209
int LGBM_DatasetPushRowsByCSR(DatasetHandle dataset,
1210
1211
1212
1213
1214
1215
1216
1217
1218
                              const void* indptr,
                              int indptr_type,
                              const int32_t* indices,
                              const void* data,
                              int data_type,
                              int64_t nindptr,
                              int64_t nelem,
                              int64_t,
                              int64_t start_row) {
Guolin Ke's avatar
Guolin Ke committed
1219
1220
  API_BEGIN();
  auto p_dataset = reinterpret_cast<Dataset*>(dataset);
1221
  auto get_row_fun = RowFunctionFromCSR<int>(indptr, indptr_type, indices, data, data_type, nindptr, nelem);
Guolin Ke's avatar
Guolin Ke committed
1222
  int32_t nrow = static_cast<int32_t>(nindptr - 1);
1223
1224
1225
  if (p_dataset->has_raw()) {
    p_dataset->ResizeRaw(p_dataset->num_numeric_features() + nrow);
  }
1226
  OMP_INIT_EX();
1227
  #pragma omp parallel for num_threads(OMP_NUM_THREADS()) schedule(static)
Guolin Ke's avatar
Guolin Ke committed
1228
  for (int i = 0; i < nrow; ++i) {
1229
    OMP_LOOP_EX_BEGIN();
Guolin Ke's avatar
Guolin Ke committed
1230
1231
    const int tid = omp_get_thread_num();
    auto one_row = get_row_fun(i);
1232
    p_dataset->PushOneRow(tid, static_cast<data_size_t>(start_row + i), one_row);
1233
    OMP_LOOP_EX_END();
Guolin Ke's avatar
Guolin Ke committed
1234
  }
1235
  OMP_THROW_EX();
1236
  if (!p_dataset->wait_for_manual_finish() && (start_row + nrow == static_cast<int64_t>(p_dataset->num_data()))) {
Guolin Ke's avatar
Guolin Ke committed
1237
1238
    p_dataset->FinishLoad();
  }
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
  API_END();
}

int LGBM_DatasetPushRowsByCSRWithMetadata(DatasetHandle dataset,
                                          const void* indptr,
                                          int indptr_type,
                                          const int32_t* indices,
                                          const void* data,
                                          int data_type,
                                          int64_t nindptr,
                                          int64_t nelem,
                                          int64_t start_row,
                                          const float* labels,
                                          const float* weights,
                                          const double* init_scores,
                                          const int32_t* queries,
                                          int32_t tid) {
  API_BEGIN();
#ifdef LABEL_T_USE_DOUBLE
  Log::Fatal("Don't support LABEL_T_USE_DOUBLE");
#endif
  if (!data) {
    Log::Fatal("data cannot be null.");
  }
  auto p_dataset = reinterpret_cast<Dataset*>(dataset);
  auto get_row_fun = RowFunctionFromCSR<int>(indptr, indptr_type, indices, data, data_type, nindptr, nelem);
  int32_t nrow = static_cast<int32_t>(nindptr - 1);
  if (p_dataset->has_raw()) {
    p_dataset->ResizeRaw(p_dataset->num_numeric_features() + nrow);
  }
1269
1270
1271

  const int max_omp_threads = p_dataset->omp_max_threads() > 0 ? p_dataset->omp_max_threads() : OMP_NUM_THREADS();

1272
  OMP_INIT_EX();
1273
#pragma omp parallel for num_threads(OMP_NUM_THREADS()) schedule(static)
1274
1275
1276
  for (int i = 0; i < nrow; ++i) {
    OMP_LOOP_EX_BEGIN();
    // convert internal thread id to be unique based on external thread id
1277
    const int internal_tid = omp_get_thread_num() + (max_omp_threads * tid);
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
    auto one_row = get_row_fun(i);
    p_dataset->PushOneRow(internal_tid, static_cast<data_size_t>(start_row + i), one_row);
    OMP_LOOP_EX_END();
  }
  OMP_THROW_EX();

  p_dataset->InsertMetadataAt(static_cast<int32_t>(start_row), nrow, labels, weights, init_scores, queries);

  if (!p_dataset->wait_for_manual_finish() && (start_row + nrow == static_cast<int64_t>(p_dataset->num_data()))) {
    p_dataset->FinishLoad();
  }
  API_END();
}

int LGBM_DatasetSetWaitForManualFinish(DatasetHandle dataset, int wait) {
  API_BEGIN();
  auto p_dataset = reinterpret_cast<Dataset*>(dataset);
  p_dataset->set_wait_for_manual_finish(wait);
  API_END();
}

int LGBM_DatasetMarkFinished(DatasetHandle dataset) {
  API_BEGIN();
  auto p_dataset = reinterpret_cast<Dataset*>(dataset);
  p_dataset->FinishLoad();
Guolin Ke's avatar
Guolin Ke committed
1303
1304
1305
  API_END();
}

Guolin Ke's avatar
Guolin Ke committed
1306
int LGBM_DatasetCreateFromMat(const void* data,
1307
1308
1309
1310
1311
1312
1313
                              int data_type,
                              int32_t nrow,
                              int32_t ncol,
                              int is_row_major,
                              const char* parameters,
                              const DatasetHandle reference,
                              DatasetHandle* out) {
1314
1315
1316
1317
1318
  return LGBM_DatasetCreateFromMats(1,
                                    &data,
                                    data_type,
                                    &nrow,
                                    ncol,
1319
                                    &is_row_major,
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
                                    parameters,
                                    reference,
                                    out);
}

int LGBM_DatasetCreateFromMats(int32_t nmat,
                               const void** data,
                               int data_type,
                               int32_t* nrow,
                               int32_t ncol,
1330
                               int* is_row_major,
1331
1332
1333
                               const char* parameters,
                               const DatasetHandle reference,
                               DatasetHandle* out) {
1334
  API_BEGIN();
Guolin Ke's avatar
Guolin Ke committed
1335
1336
  auto param = Config::Str2Map(parameters);
  Config config;
1337
  config.Set(param);
1338
  OMP_SET_NUM_THREADS(config.num_threads);
Guolin Ke's avatar
Guolin Ke committed
1339
  std::unique_ptr<Dataset> ret;
1340
1341
1342
1343
1344
1345
1346
  int32_t total_nrow = 0;
  for (int j = 0; j < nmat; ++j) {
    total_nrow += nrow[j];
  }

  std::vector<std::function<std::vector<double>(int row_idx)>> get_row_fun;
  for (int j = 0; j < nmat; ++j) {
1347
    get_row_fun.push_back(RowFunctionFromDenseMatric(data[j], nrow[j], ncol, data_type, is_row_major[j]));
1348
  }
1349

Guolin Ke's avatar
Guolin Ke committed
1350
1351
  if (reference == nullptr) {
    // sample data first
1352
1353
    auto sample_indices = CreateSampleIndices(total_nrow, config);
    int sample_cnt = static_cast<int>(sample_indices.size());
1354
    std::vector<std::vector<double>> sample_values(ncol);
Guolin Ke's avatar
Guolin Ke committed
1355
    std::vector<std::vector<int>> sample_idx(ncol);
1356
1357
1358

    int offset = 0;
    int j = 0;
Guolin Ke's avatar
Guolin Ke committed
1359
    for (size_t i = 0; i < sample_indices.size(); ++i) {
Guolin Ke's avatar
Guolin Ke committed
1360
      auto idx = sample_indices[i];
1361
1362
1363
1364
      while ((idx - offset) >= nrow[j]) {
        offset += nrow[j];
        ++j;
      }
1365

1366
1367
1368
1369
1370
      auto row = get_row_fun[j](static_cast<int>(idx - offset));
      for (size_t k = 0; k < row.size(); ++k) {
        if (std::fabs(row[k]) > kZeroThreshold || std::isnan(row[k])) {
          sample_values[k].emplace_back(row[k]);
          sample_idx[k].emplace_back(static_cast<int>(i));
Guolin Ke's avatar
Guolin Ke committed
1371
        }
Guolin Ke's avatar
Guolin Ke committed
1372
1373
      }
    }
Guolin Ke's avatar
Guolin Ke committed
1374
    DatasetLoader loader(config, nullptr, 1, nullptr);
1375
1376
1377
1378
    ret.reset(loader.ConstructFromSampleData(Vector2Ptr<double>(&sample_values).data(),
                                             Vector2Ptr<int>(&sample_idx).data(),
                                             ncol,
                                             VectorSize<double>(sample_values).data(),
1379
1380
1381
                                             sample_cnt,
                                             total_nrow,
                                             total_nrow));
Guolin Ke's avatar
Guolin Ke committed
1382
  } else {
1383
    ret.reset(new Dataset(total_nrow));
Guolin Ke's avatar
Guolin Ke committed
1384
    ret->CreateValid(
1385
      reinterpret_cast<const Dataset*>(reference));
1386
1387
1388
    if (ret->has_raw()) {
      ret->ResizeRaw(total_nrow);
    }
Guolin Ke's avatar
Guolin Ke committed
1389
  }
1390
1391
1392
  int32_t start_row = 0;
  for (int j = 0; j < nmat; ++j) {
    OMP_INIT_EX();
1393
    #pragma omp parallel for num_threads(OMP_NUM_THREADS()) schedule(static)
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
    for (int i = 0; i < nrow[j]; ++i) {
      OMP_LOOP_EX_BEGIN();
      const int tid = omp_get_thread_num();
      auto one_row = get_row_fun[j](i);
      ret->PushOneRow(tid, start_row + i, one_row);
      OMP_LOOP_EX_END();
    }
    OMP_THROW_EX();

    start_row += nrow[j];
Guolin Ke's avatar
Guolin Ke committed
1404
1405
  }
  ret->FinishLoad();
Guolin Ke's avatar
Guolin Ke committed
1406
  *out = ret.release();
1407
  API_END();
1408
1409
}

Guolin Ke's avatar
Guolin Ke committed
1410
int LGBM_DatasetCreateFromCSR(const void* indptr,
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
                              int indptr_type,
                              const int32_t* indices,
                              const void* data,
                              int data_type,
                              int64_t nindptr,
                              int64_t nelem,
                              int64_t num_col,
                              const char* parameters,
                              const DatasetHandle reference,
                              DatasetHandle* out) {
1421
  API_BEGIN();
1422
1423
1424
1425
1426
  if (num_col <= 0) {
    Log::Fatal("The number of columns should be greater than zero.");
  } else if (num_col >= INT32_MAX) {
    Log::Fatal("The number of columns should be smaller than INT32_MAX.");
  }
Guolin Ke's avatar
Guolin Ke committed
1427
1428
  auto param = Config::Str2Map(parameters);
  Config config;
1429
  config.Set(param);
1430
  OMP_SET_NUM_THREADS(config.num_threads);
Guolin Ke's avatar
Guolin Ke committed
1431
  std::unique_ptr<Dataset> ret;
1432
  auto get_row_fun = RowFunctionFromCSR<int>(indptr, indptr_type, indices, data, data_type, nindptr, nelem);
1433
1434
1435
  int32_t nrow = static_cast<int32_t>(nindptr - 1);
  if (reference == nullptr) {
    // sample data first
1436
1437
    auto sample_indices = CreateSampleIndices(nrow, config);
    int sample_cnt = static_cast<int>(sample_indices.size());
Guolin Ke's avatar
Guolin Ke committed
1438
1439
    std::vector<std::vector<double>> sample_values(num_col);
    std::vector<std::vector<int>> sample_idx(num_col);
1440
1441
1442
1443
    for (size_t i = 0; i < sample_indices.size(); ++i) {
      auto idx = sample_indices[i];
      auto row = get_row_fun(static_cast<int>(idx));
      for (std::pair<int, double>& inner_data : row) {
Nikita Titov's avatar
Nikita Titov committed
1444
        CHECK_LT(inner_data.first, num_col);
Guolin Ke's avatar
Guolin Ke committed
1445
        if (std::fabs(inner_data.second) > kZeroThreshold || std::isnan(inner_data.second)) {
Guolin Ke's avatar
Guolin Ke committed
1446
1447
          sample_values[inner_data.first].emplace_back(inner_data.second);
          sample_idx[inner_data.first].emplace_back(static_cast<int>(i));
1448
1449
1450
        }
      }
    }
Guolin Ke's avatar
Guolin Ke committed
1451
    DatasetLoader loader(config, nullptr, 1, nullptr);
1452
1453
1454
1455
    ret.reset(loader.ConstructFromSampleData(Vector2Ptr<double>(&sample_values).data(),
                                             Vector2Ptr<int>(&sample_idx).data(),
                                             static_cast<int>(num_col),
                                             VectorSize<double>(sample_values).data(),
1456
1457
1458
                                             sample_cnt,
                                             nrow,
                                             nrow));
1459
  } else {
1460
    ret.reset(new Dataset(nrow));
Guolin Ke's avatar
Guolin Ke committed
1461
    ret->CreateValid(
1462
      reinterpret_cast<const Dataset*>(reference));
1463
1464
1465
    if (ret->has_raw()) {
      ret->ResizeRaw(nrow);
    }
1466
  }
1467
  OMP_INIT_EX();
1468
  #pragma omp parallel for num_threads(OMP_NUM_THREADS()) schedule(static)
1469
  for (int i = 0; i < static_cast<int>(nindptr - 1); ++i) {
1470
    OMP_LOOP_EX_BEGIN();
1471
1472
1473
    const int tid = omp_get_thread_num();
    auto one_row = get_row_fun(i);
    ret->PushOneRow(tid, i, one_row);
1474
    OMP_LOOP_EX_END();
1475
  }
1476
  OMP_THROW_EX();
1477
  ret->FinishLoad();
Guolin Ke's avatar
Guolin Ke committed
1478
  *out = ret.release();
1479
  API_END();
1480
1481
}

1482
int LGBM_DatasetCreateFromCSRFunc(void* get_row_funptr,
1483
1484
1485
1486
1487
                                  int num_rows,
                                  int64_t num_col,
                                  const char* parameters,
                                  const DatasetHandle reference,
                                  DatasetHandle* out) {
1488
  API_BEGIN();
1489
1490
1491
1492
1493
  if (num_col <= 0) {
    Log::Fatal("The number of columns should be greater than zero.");
  } else if (num_col >= INT32_MAX) {
    Log::Fatal("The number of columns should be smaller than INT32_MAX.");
  }
1494
1495
1496
1497
  auto get_row_fun = *static_cast<std::function<void(int idx, std::vector<std::pair<int, double>>&)>*>(get_row_funptr);
  auto param = Config::Str2Map(parameters);
  Config config;
  config.Set(param);
1498
  OMP_SET_NUM_THREADS(config.num_threads);
1499
1500
1501
1502
  std::unique_ptr<Dataset> ret;
  int32_t nrow = num_rows;
  if (reference == nullptr) {
    // sample data first
1503
1504
    auto sample_indices = CreateSampleIndices(nrow, config);
    int sample_cnt = static_cast<int>(sample_indices.size());
1505
1506
1507
1508
1509
1510
1511
1512
    std::vector<std::vector<double>> sample_values(num_col);
    std::vector<std::vector<int>> sample_idx(num_col);
    // local buffer to re-use memory
    std::vector<std::pair<int, double>> buffer;
    for (size_t i = 0; i < sample_indices.size(); ++i) {
      auto idx = sample_indices[i];
      get_row_fun(static_cast<int>(idx), buffer);
      for (std::pair<int, double>& inner_data : buffer) {
Nikita Titov's avatar
Nikita Titov committed
1513
        CHECK_LT(inner_data.first, num_col);
1514
1515
1516
1517
1518
1519
1520
        if (std::fabs(inner_data.second) > kZeroThreshold || std::isnan(inner_data.second)) {
          sample_values[inner_data.first].emplace_back(inner_data.second);
          sample_idx[inner_data.first].emplace_back(static_cast<int>(i));
        }
      }
    }
    DatasetLoader loader(config, nullptr, 1, nullptr);
1521
1522
1523
1524
    ret.reset(loader.ConstructFromSampleData(Vector2Ptr<double>(&sample_values).data(),
                                             Vector2Ptr<int>(&sample_idx).data(),
                                             static_cast<int>(num_col),
                                             VectorSize<double>(sample_values).data(),
1525
1526
1527
                                             sample_cnt,
                                             nrow,
                                             nrow));
1528
1529
1530
1531
  } else {
    ret.reset(new Dataset(nrow));
    ret->CreateValid(
      reinterpret_cast<const Dataset*>(reference));
1532
1533
1534
    if (ret->has_raw()) {
      ret->ResizeRaw(nrow);
    }
1535
  }
1536

1537
  OMP_INIT_EX();
Guolin Ke's avatar
Guolin Ke committed
1538
  std::vector<std::pair<int, double>> thread_buffer;
1539
  #pragma omp parallel for num_threads(OMP_NUM_THREADS()) schedule(static) private(thread_buffer)
1540
1541
1542
  for (int i = 0; i < num_rows; ++i) {
    OMP_LOOP_EX_BEGIN();
    {
1543
      const int tid = omp_get_thread_num();
Guolin Ke's avatar
Guolin Ke committed
1544
1545
      get_row_fun(i, thread_buffer);
      ret->PushOneRow(tid, i, thread_buffer);
1546
1547
1548
1549
1550
1551
1552
1553
1554
    }
    OMP_LOOP_EX_END();
  }
  OMP_THROW_EX();
  ret->FinishLoad();
  *out = ret.release();
  API_END();
}

Guolin Ke's avatar
Guolin Ke committed
1555
int LGBM_DatasetCreateFromCSC(const void* col_ptr,
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
                              int col_ptr_type,
                              const int32_t* indices,
                              const void* data,
                              int data_type,
                              int64_t ncol_ptr,
                              int64_t nelem,
                              int64_t num_row,
                              const char* parameters,
                              const DatasetHandle reference,
                              DatasetHandle* out) {
1566
  API_BEGIN();
Guolin Ke's avatar
Guolin Ke committed
1567
1568
  auto param = Config::Str2Map(parameters);
  Config config;
1569
  config.Set(param);
1570
  OMP_SET_NUM_THREADS(config.num_threads);
Guolin Ke's avatar
Guolin Ke committed
1571
  std::unique_ptr<Dataset> ret;
Guolin Ke's avatar
Guolin Ke committed
1572
1573
1574
  int32_t nrow = static_cast<int32_t>(num_row);
  if (reference == nullptr) {
    // sample data first
1575
1576
    auto sample_indices = CreateSampleIndices(nrow, config);
    int sample_cnt = static_cast<int>(sample_indices.size());
Guolin Ke's avatar
Guolin Ke committed
1577
    std::vector<std::vector<double>> sample_values(ncol_ptr - 1);
Guolin Ke's avatar
Guolin Ke committed
1578
    std::vector<std::vector<int>> sample_idx(ncol_ptr - 1);
1579
    OMP_INIT_EX();
1580
    #pragma omp parallel for num_threads(OMP_NUM_THREADS()) schedule(static)
Guolin Ke's avatar
Guolin Ke committed
1581
    for (int i = 0; i < static_cast<int>(sample_values.size()); ++i) {
1582
      OMP_LOOP_EX_BEGIN();
Guolin Ke's avatar
Guolin Ke committed
1583
1584
1585
      CSC_RowIterator col_it(col_ptr, col_ptr_type, indices, data, data_type, ncol_ptr, nelem, i);
      for (int j = 0; j < sample_cnt; j++) {
        auto val = col_it.Get(sample_indices[j]);
Guolin Ke's avatar
Guolin Ke committed
1586
        if (std::fabs(val) > kZeroThreshold || std::isnan(val)) {
Guolin Ke's avatar
Guolin Ke committed
1587
1588
          sample_values[i].emplace_back(val);
          sample_idx[i].emplace_back(j);
Guolin Ke's avatar
Guolin Ke committed
1589
1590
        }
      }
1591
      OMP_LOOP_EX_END();
Guolin Ke's avatar
Guolin Ke committed
1592
    }
1593
    OMP_THROW_EX();
Guolin Ke's avatar
Guolin Ke committed
1594
    DatasetLoader loader(config, nullptr, 1, nullptr);
1595
1596
1597
1598
    ret.reset(loader.ConstructFromSampleData(Vector2Ptr<double>(&sample_values).data(),
                                             Vector2Ptr<int>(&sample_idx).data(),
                                             static_cast<int>(sample_values.size()),
                                             VectorSize<double>(sample_values).data(),
1599
1600
1601
                                             sample_cnt,
                                             nrow,
                                             nrow));
Guolin Ke's avatar
Guolin Ke committed
1602
  } else {
1603
    ret.reset(new Dataset(nrow));
Guolin Ke's avatar
Guolin Ke committed
1604
    ret->CreateValid(
1605
      reinterpret_cast<const Dataset*>(reference));
Guolin Ke's avatar
Guolin Ke committed
1606
  }
1607
  OMP_INIT_EX();
1608
  #pragma omp parallel for num_threads(OMP_NUM_THREADS()) schedule(static)
1609
  for (int i = 0; i < static_cast<int>(ncol_ptr - 1); ++i) {
1610
    OMP_LOOP_EX_BEGIN();
Guolin Ke's avatar
Guolin Ke committed
1611
    const int tid = omp_get_thread_num();
Guolin Ke's avatar
Guolin Ke committed
1612
    int feature_idx = ret->InnerFeatureIndex(i);
1613
1614
1615
    if (feature_idx < 0) {
      continue;
    }
Guolin Ke's avatar
Guolin Ke committed
1616
1617
    int group = ret->Feature2Group(feature_idx);
    int sub_feature = ret->Feture2SubFeature(feature_idx);
Guolin Ke's avatar
Guolin Ke committed
1618
    CSC_RowIterator col_it(col_ptr, col_ptr_type, indices, data, data_type, ncol_ptr, nelem, i);
Guolin Ke's avatar
Guolin Ke committed
1619
1620
1621
1622
1623
1624
1625
    auto bin_mapper = ret->FeatureBinMapper(feature_idx);
    if (bin_mapper->GetDefaultBin() == bin_mapper->GetMostFreqBin()) {
      int row_idx = 0;
      while (row_idx < nrow) {
        auto pair = col_it.NextNonZero();
        row_idx = pair.first;
        // no more data
1626
1627
1628
        if (row_idx < 0) {
          break;
        }
1629
        ret->PushOneData(tid, row_idx, group, feature_idx, sub_feature, pair.second);
Guolin Ke's avatar
Guolin Ke committed
1630
1631
1632
1633
      }
    } else {
      for (int row_idx = 0; row_idx < nrow; ++row_idx) {
        auto val = col_it.Get(row_idx);
1634
        ret->PushOneData(tid, row_idx, group, feature_idx, sub_feature, val);
Guolin Ke's avatar
Guolin Ke committed
1635
      }
Guolin Ke's avatar
Guolin Ke committed
1636
    }
1637
    OMP_LOOP_EX_END();
Guolin Ke's avatar
Guolin Ke committed
1638
  }
1639
  OMP_THROW_EX();
Guolin Ke's avatar
Guolin Ke committed
1640
  ret->FinishLoad();
Guolin Ke's avatar
Guolin Ke committed
1641
  *out = ret.release();
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
  API_END();
}

int LGBM_DatasetCreateFromArrow(int64_t n_chunks,
                                const ArrowArray* chunks,
                                const ArrowSchema* schema,
                                const char* parameters,
                                const DatasetHandle reference,
                                DatasetHandle *out) {
  API_BEGIN();

  auto param = Config::Str2Map(parameters);
  Config config;
  config.Set(param);
  OMP_SET_NUM_THREADS(config.num_threads);

  std::unique_ptr<Dataset> ret;

  // Prepare the Arrow data
  ArrowTable table(n_chunks, chunks, schema);

  // Initialize the dataset
  if (reference == nullptr) {
    // If there is no reference dataset, we first sample indices
    auto sample_indices = CreateSampleIndices(static_cast<int32_t>(table.get_num_rows()), config);
    auto sample_count = static_cast<int>(sample_indices.size());
    std::vector<std::vector<double>> sample_values(table.get_num_columns());
    std::vector<std::vector<int>> sample_idx(table.get_num_columns());

    // Then, we obtain sample values by parallelizing across columns
    OMP_INIT_EX();
    #pragma omp parallel for num_threads(OMP_NUM_THREADS()) schedule(static)
    for (int64_t j = 0; j < table.get_num_columns(); ++j) {
      OMP_LOOP_EX_BEGIN();

      // Values need to be copied from the record batches.
      sample_values[j].reserve(sample_indices.size());
      sample_idx[j].reserve(sample_indices.size());

      // The chunks are iterated over in the inner loop as columns can be treated independently.
      int last_idx = 0;
      int i = 0;
      auto it = table.get_column(j).begin<double>();
      for (auto idx : sample_indices) {
        std::advance(it, idx - last_idx);
        auto v = *it;
        if (std::fabs(v) > kZeroThreshold || std::isnan(v)) {
          sample_values[j].emplace_back(v);
          sample_idx[j].emplace_back(i);
        }
        last_idx = idx;
        i++;
      }
      OMP_LOOP_EX_END();
    }
    OMP_THROW_EX();

    // Finally, we initialize a loader from the sampled values
    DatasetLoader loader(config, nullptr, 1, nullptr);
    ret.reset(loader.ConstructFromSampleData(Vector2Ptr<double>(&sample_values).data(),
                                             Vector2Ptr<int>(&sample_idx).data(),
                                             table.get_num_columns(),
                                             VectorSize<double>(sample_values).data(),
                                             sample_count,
                                             table.get_num_rows(),
                                             table.get_num_rows()));
  } else {
    ret.reset(new Dataset(static_cast<data_size_t>(table.get_num_rows())));
    ret->CreateValid(reinterpret_cast<const Dataset*>(reference));
    if (ret->has_raw()) {
      ret->ResizeRaw(static_cast<int>(table.get_num_rows()));
    }
  }

  // After sampling and properly initializing all bins, we can add our data to the dataset. Here,
  // we parallelize across rows.
  OMP_INIT_EX();
  #pragma omp parallel for num_threads(OMP_NUM_THREADS()) schedule(static)
  for (int64_t j = 0; j < table.get_num_columns(); ++j) {
    OMP_LOOP_EX_BEGIN();
    const int tid = omp_get_thread_num();
    data_size_t idx = 0;
    auto column = table.get_column(j);
    for (auto it = column.begin<double>(), end = column.end<double>(); it != end; ++it) {
      ret->PushOneValue(tid, idx++, j, *it);
    }
    OMP_LOOP_EX_END();
  }
  OMP_THROW_EX();

  ret->FinishLoad();
  *out = ret.release();
1734
  API_END();
Guolin Ke's avatar
Guolin Ke committed
1735
1736
}

Guolin Ke's avatar
Guolin Ke committed
1737
int LGBM_DatasetGetSubset(
1738
  const DatasetHandle handle,
wxchan's avatar
wxchan committed
1739
1740
1741
  const int32_t* used_row_indices,
  int32_t num_used_row_indices,
  const char* parameters,
Guolin Ke's avatar
typo  
Guolin Ke committed
1742
  DatasetHandle* out) {
wxchan's avatar
wxchan committed
1743
  API_BEGIN();
Guolin Ke's avatar
Guolin Ke committed
1744
1745
  auto param = Config::Str2Map(parameters);
  Config config;
1746
  config.Set(param);
1747
  OMP_SET_NUM_THREADS(config.num_threads);
1748
  auto full_dataset = reinterpret_cast<const Dataset*>(handle);
1749
  CHECK_GT(num_used_row_indices, 0);
1750
1751
  const int32_t lower = 0;
  const int32_t upper = full_dataset->num_data() - 1;
1752
  CheckElementsIntervalClosed(used_row_indices, lower, upper, num_used_row_indices, "Used indices of subset");
1753
1754
1755
  if (!std::is_sorted(used_row_indices, used_row_indices + num_used_row_indices)) {
    Log::Fatal("used_row_indices should be sorted in Subset");
  }
Guolin Ke's avatar
Guolin Ke committed
1756
  auto ret = std::unique_ptr<Dataset>(new Dataset(num_used_row_indices));
1757
  ret->CopyFeatureMapperFrom(full_dataset);
1758
  ret->CopySubrow(full_dataset, used_row_indices, num_used_row_indices, true);
wxchan's avatar
wxchan committed
1759
1760
1761
1762
  *out = ret.release();
  API_END();
}

Guolin Ke's avatar
Guolin Ke committed
1763
int LGBM_DatasetSetFeatureNames(
Guolin Ke's avatar
typo  
Guolin Ke committed
1764
  DatasetHandle handle,
Guolin Ke's avatar
Guolin Ke committed
1765
  const char** feature_names,
Guolin Ke's avatar
Guolin Ke committed
1766
  int num_feature_names) {
Guolin Ke's avatar
Guolin Ke committed
1767
1768
1769
  API_BEGIN();
  auto dataset = reinterpret_cast<Dataset*>(handle);
  std::vector<std::string> feature_names_str;
Guolin Ke's avatar
Guolin Ke committed
1770
  for (int i = 0; i < num_feature_names; ++i) {
Guolin Ke's avatar
Guolin Ke committed
1771
1772
1773
1774
1775
1776
    feature_names_str.emplace_back(feature_names[i]);
  }
  dataset->set_feature_names(feature_names_str);
  API_END();
}

Guolin Ke's avatar
Guolin Ke committed
1777
int LGBM_DatasetGetFeatureNames(
1778
1779
1780
1781
1782
1783
    DatasetHandle handle,
    const int len,
    int* num_feature_names,
    const size_t buffer_len,
    size_t* out_buffer_len,
    char** feature_names) {
1784
  API_BEGIN();
1785
  *out_buffer_len = 0;
1786
1787
  auto dataset = reinterpret_cast<Dataset*>(handle);
  auto inside_feature_name = dataset->feature_names();
Guolin Ke's avatar
Guolin Ke committed
1788
1789
  *num_feature_names = static_cast<int>(inside_feature_name.size());
  for (int i = 0; i < *num_feature_names; ++i) {
1790
1791
1792
1793
1794
    if (i < len) {
      std::memcpy(feature_names[i], inside_feature_name[i].c_str(), std::min(inside_feature_name[i].size() + 1, buffer_len));
      feature_names[i][buffer_len - 1] = '\0';
    }
    *out_buffer_len = std::max(inside_feature_name[i].size() + 1, *out_buffer_len);
1795
1796
1797
1798
  }
  API_END();
}

1799
1800
1801
#ifdef _MSC_VER
  #pragma warning(disable : 4702)
#endif
Guolin Ke's avatar
Guolin Ke committed
1802
int LGBM_DatasetFree(DatasetHandle handle) {
1803
  API_BEGIN();
Guolin Ke's avatar
Guolin Ke committed
1804
  delete reinterpret_cast<Dataset*>(handle);
1805
  API_END();
1806
1807
}

Guolin Ke's avatar
Guolin Ke committed
1808
int LGBM_DatasetSaveBinary(DatasetHandle handle,
1809
                           const char* filename) {
1810
  API_BEGIN();
Guolin Ke's avatar
Guolin Ke committed
1811
1812
  auto dataset = reinterpret_cast<Dataset*>(handle);
  dataset->SaveBinaryFile(filename);
1813
  API_END();
1814
1815
}

1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
int LGBM_DatasetSerializeReferenceToBinary(DatasetHandle handle,
                                           ByteBufferHandle* out,
                                           int32_t* out_len) {
  API_BEGIN();
  auto dataset = reinterpret_cast<Dataset*>(handle);
  std::unique_ptr<LightGBM::ByteBuffer> ret;
  ret.reset(new LightGBM::ByteBuffer());
  dataset->SerializeReference(ret.get());
  *out_len = static_cast<int32_t>(ret->GetSize());
  *out = ret.release();
  API_END();
}

1829
1830
1831
1832
1833
1834
1835
1836
int LGBM_DatasetDumpText(DatasetHandle handle,
                         const char* filename) {
  API_BEGIN();
  auto dataset = reinterpret_cast<Dataset*>(handle);
  dataset->DumpTextFile(filename);
  API_END();
}

Guolin Ke's avatar
Guolin Ke committed
1837
int LGBM_DatasetSetField(DatasetHandle handle,
1838
1839
1840
1841
                         const char* field_name,
                         const void* field_data,
                         int num_element,
                         int type) {
1842
  API_BEGIN();
Guolin Ke's avatar
Guolin Ke committed
1843
  auto dataset = reinterpret_cast<Dataset*>(handle);
1844
  bool is_success = false;
Guolin Ke's avatar
Guolin Ke committed
1845
  if (type == C_API_DTYPE_FLOAT32) {
Guolin Ke's avatar
Guolin Ke committed
1846
    is_success = dataset->SetFloatField(field_name, reinterpret_cast<const float*>(field_data), static_cast<int32_t>(num_element));
Guolin Ke's avatar
Guolin Ke committed
1847
  } else if (type == C_API_DTYPE_INT32) {
Guolin Ke's avatar
Guolin Ke committed
1848
    is_success = dataset->SetIntField(field_name, reinterpret_cast<const int*>(field_data), static_cast<int32_t>(num_element));
Guolin Ke's avatar
Guolin Ke committed
1849
1850
  } else if (type == C_API_DTYPE_FLOAT64) {
    is_success = dataset->SetDoubleField(field_name, reinterpret_cast<const double*>(field_data), static_cast<int32_t>(num_element));
1851
  }
1852
1853
1854
  if (!is_success) {
    Log::Fatal("Input data type error or field not found");
  }
1855
  API_END();
1856
1857
}

1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
int LGBM_DatasetSetFieldFromArrow(DatasetHandle handle,
                                  const char* field_name,
                                  int64_t n_chunks,
                                  const ArrowArray* chunks,
                                  const ArrowSchema* schema) {
  API_BEGIN();
  auto dataset = reinterpret_cast<Dataset*>(handle);
  ArrowChunkedArray ca(n_chunks, chunks, schema);
  auto is_success = dataset->SetFieldFromArrow(field_name, ca);
  if (!is_success) {
    Log::Fatal("Input field is not supported");
  }
  API_END();
}

Guolin Ke's avatar
Guolin Ke committed
1873
int LGBM_DatasetGetField(DatasetHandle handle,
1874
1875
1876
1877
                         const char* field_name,
                         int* out_len,
                         const void** out_ptr,
                         int* out_type) {
1878
  API_BEGIN();
Guolin Ke's avatar
Guolin Ke committed
1879
  auto dataset = reinterpret_cast<Dataset*>(handle);
1880
  bool is_success = false;
Guolin Ke's avatar
Guolin Ke committed
1881
  if (dataset->GetFloatField(field_name, out_len, reinterpret_cast<const float**>(out_ptr))) {
Guolin Ke's avatar
Guolin Ke committed
1882
    *out_type = C_API_DTYPE_FLOAT32;
1883
    is_success = true;
Guolin Ke's avatar
Guolin Ke committed
1884
  } else if (dataset->GetIntField(field_name, out_len, reinterpret_cast<const int**>(out_ptr))) {
Guolin Ke's avatar
Guolin Ke committed
1885
    *out_type = C_API_DTYPE_INT32;
1886
    is_success = true;
Guolin Ke's avatar
Guolin Ke committed
1887
1888
1889
  } else if (dataset->GetDoubleField(field_name, out_len, reinterpret_cast<const double**>(out_ptr))) {
    *out_type = C_API_DTYPE_FLOAT64;
    is_success = true;
Nikita Titov's avatar
Nikita Titov committed
1890
  }
1891
1892
1893
1894
1895
1896
  if (!is_success) {
    Log::Fatal("Field not found");
  }
  if (*out_ptr == nullptr) {
    *out_len = 0;
  }
1897
  API_END();
1898
1899
}

1900
int LGBM_DatasetUpdateParamChecking(const char* old_parameters, const char* new_parameters) {
1901
  API_BEGIN();
1902
1903
1904
1905
1906
  auto old_param = Config::Str2Map(old_parameters);
  Config old_config;
  old_config.Set(old_param);
  auto new_param = Config::Str2Map(new_parameters);
  Booster::CheckDatasetResetConfig(old_config, new_param);
1907
1908
1909
  API_END();
}

Guolin Ke's avatar
Guolin Ke committed
1910
int LGBM_DatasetGetNumData(DatasetHandle handle,
1911
                           int* out) {
1912
  API_BEGIN();
Guolin Ke's avatar
Guolin Ke committed
1913
1914
  auto dataset = reinterpret_cast<Dataset*>(handle);
  *out = dataset->num_data();
1915
  API_END();
1916
1917
}

Guolin Ke's avatar
Guolin Ke committed
1918
int LGBM_DatasetGetNumFeature(DatasetHandle handle,
1919
                              int* out) {
1920
  API_BEGIN();
Guolin Ke's avatar
Guolin Ke committed
1921
1922
  auto dataset = reinterpret_cast<Dataset*>(handle);
  *out = dataset->num_total_features();
1923
  API_END();
Guolin Ke's avatar
Guolin Ke committed
1924
}
1925

1926
1927
1928
1929
1930
int LGBM_DatasetGetFeatureNumBin(DatasetHandle handle,
                                 int feature,
                                 int* out) {
  API_BEGIN();
  auto dataset = reinterpret_cast<Dataset*>(handle);
1931
1932
1933
1934
1935
  int num_features = dataset->num_total_features();
  if (feature < 0 || feature >= num_features) {
    Log::Fatal("Tried to retrieve number of bins for feature index %d, "
               "but the valid feature indices are [0, %d].", feature, num_features - 1);
  }
1936
1937
1938
1939
1940
1941
1942
1943
1944
  int inner_idx = dataset->InnerFeatureIndex(feature);
  if (inner_idx >= 0) {
    *out = dataset->FeatureNumBin(inner_idx);
  } else {
    *out = 0;
  }
  API_END();
}

1945
1946
1947
1948
1949
int LGBM_DatasetAddFeaturesFrom(DatasetHandle target,
                                DatasetHandle source) {
  API_BEGIN();
  auto target_d = reinterpret_cast<Dataset*>(target);
  auto source_d = reinterpret_cast<Dataset*>(source);
1950
  target_d->AddFeaturesFrom(source_d);
1951
1952
1953
  API_END();
}

1954
1955
// ---- start of booster

Guolin Ke's avatar
Guolin Ke committed
1956
int LGBM_BoosterCreate(const DatasetHandle train_data,
1957
1958
                       const char* parameters,
                       BoosterHandle* out) {
1959
  API_BEGIN();
Guolin Ke's avatar
Guolin Ke committed
1960
  const Dataset* p_train_data = reinterpret_cast<const Dataset*>(train_data);
wxchan's avatar
wxchan committed
1961
1962
  auto ret = std::unique_ptr<Booster>(new Booster(p_train_data, parameters));
  *out = ret.release();
1963
  API_END();
1964
1965
}

Guolin Ke's avatar
Guolin Ke committed
1966
int LGBM_BoosterCreateFromModelfile(
1967
  const char* filename,
Guolin Ke's avatar
Guolin Ke committed
1968
  int* out_num_iterations,
1969
  BoosterHandle* out) {
1970
  API_BEGIN();
wxchan's avatar
wxchan committed
1971
  auto ret = std::unique_ptr<Booster>(new Booster(filename));
Guolin Ke's avatar
Guolin Ke committed
1972
  *out_num_iterations = ret->GetBoosting()->GetCurrentIteration();
wxchan's avatar
wxchan committed
1973
  *out = ret.release();
1974
  API_END();
1975
1976
}

Guolin Ke's avatar
Guolin Ke committed
1977
int LGBM_BoosterLoadModelFromString(
1978
1979
1980
1981
  const char* model_str,
  int* out_num_iterations,
  BoosterHandle* out) {
  API_BEGIN();
wxchan's avatar
wxchan committed
1982
  auto ret = std::unique_ptr<Booster>(new Booster(nullptr));
1983
1984
1985
1986
1987
1988
  ret->LoadModelFromString(model_str);
  *out_num_iterations = ret->GetBoosting()->GetCurrentIteration();
  *out = ret.release();
  API_END();
}

1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
int LGBM_BoosterGetLoadedParam(
  BoosterHandle handle,
  int64_t buffer_len,
  int64_t* out_len,
  char* out_str) {
  API_BEGIN();
  Booster* ref_booster = reinterpret_cast<Booster*>(handle);
  std::string params = ref_booster->GetBoosting()->GetLoadedParam();
  *out_len = static_cast<int64_t>(params.size()) + 1;
  if (*out_len <= buffer_len) {
    std::memcpy(out_str, params.c_str(), *out_len);
  }
  API_END();
}

2004
2005
2006
#ifdef _MSC_VER
  #pragma warning(disable : 4702)
#endif
Guolin Ke's avatar
Guolin Ke committed
2007
int LGBM_BoosterFree(BoosterHandle handle) {
2008
  API_BEGIN();
Guolin Ke's avatar
Guolin Ke committed
2009
  delete reinterpret_cast<Booster*>(handle);
2010
  API_END();
2011
2012
}

2013
int LGBM_BoosterShuffleModels(BoosterHandle handle, int start_iter, int end_iter) {
2014
2015
  API_BEGIN();
  Booster* ref_booster = reinterpret_cast<Booster*>(handle);
2016
  ref_booster->ShuffleModels(start_iter, end_iter);
2017
2018
2019
  API_END();
}

Guolin Ke's avatar
Guolin Ke committed
2020
int LGBM_BoosterMerge(BoosterHandle handle,
2021
                      BoosterHandle other_handle) {
wxchan's avatar
wxchan committed
2022
2023
2024
2025
2026
2027
2028
  API_BEGIN();
  Booster* ref_booster = reinterpret_cast<Booster*>(handle);
  Booster* ref_other_booster = reinterpret_cast<Booster*>(other_handle);
  ref_booster->MergeFrom(ref_other_booster);
  API_END();
}

Guolin Ke's avatar
Guolin Ke committed
2029
int LGBM_BoosterAddValidData(BoosterHandle handle,
2030
                             const DatasetHandle valid_data) {
wxchan's avatar
wxchan committed
2031
2032
2033
2034
2035
2036
2037
  API_BEGIN();
  Booster* ref_booster = reinterpret_cast<Booster*>(handle);
  const Dataset* p_dataset = reinterpret_cast<const Dataset*>(valid_data);
  ref_booster->AddValidData(p_dataset);
  API_END();
}

Guolin Ke's avatar
Guolin Ke committed
2038
int LGBM_BoosterResetTrainingData(BoosterHandle handle,
2039
                                  const DatasetHandle train_data) {
wxchan's avatar
wxchan committed
2040
2041
2042
2043
2044
2045
2046
  API_BEGIN();
  Booster* ref_booster = reinterpret_cast<Booster*>(handle);
  const Dataset* p_dataset = reinterpret_cast<const Dataset*>(train_data);
  ref_booster->ResetTrainingData(p_dataset);
  API_END();
}

Guolin Ke's avatar
Guolin Ke committed
2047
int LGBM_BoosterResetParameter(BoosterHandle handle, const char* parameters) {
wxchan's avatar
wxchan committed
2048
2049
2050
2051
2052
2053
  API_BEGIN();
  Booster* ref_booster = reinterpret_cast<Booster*>(handle);
  ref_booster->ResetConfig(parameters);
  API_END();
}

Guolin Ke's avatar
Guolin Ke committed
2054
int LGBM_BoosterGetNumClasses(BoosterHandle handle, int* out_len) {
wxchan's avatar
wxchan committed
2055
2056
2057
2058
2059
2060
  API_BEGIN();
  Booster* ref_booster = reinterpret_cast<Booster*>(handle);
  *out_len = ref_booster->GetBoosting()->NumberOfClasses();
  API_END();
}

2061
int LGBM_BoosterGetLinear(BoosterHandle handle, int* out) {
2062
2063
  API_BEGIN();
  Booster* ref_booster = reinterpret_cast<Booster*>(handle);
2064
2065
2066
2067
2068
  if (ref_booster->GetBoosting()->IsLinear()) {
    *out = 1;
  } else {
    *out = 0;
  }
2069
2070
2071
  API_END();
}

Guolin Ke's avatar
Guolin Ke committed
2072
2073
2074
2075
2076
2077
2078
int LGBM_BoosterRefit(BoosterHandle handle, const int32_t* leaf_preds, int32_t nrow, int32_t ncol) {
  API_BEGIN();
  Booster* ref_booster = reinterpret_cast<Booster*>(handle);
  ref_booster->Refit(leaf_preds, nrow, ncol);
  API_END();
}

2079
int LGBM_BoosterUpdateOneIter(BoosterHandle handle, int* produced_empty_tree) {
2080
  API_BEGIN();
Guolin Ke's avatar
Guolin Ke committed
2081
  Booster* ref_booster = reinterpret_cast<Booster*>(handle);
2082
  if (ref_booster->TrainOneIter()) {
2083
    *produced_empty_tree = 1;
2084
  } else {
2085
    *produced_empty_tree = 0;
2086
  }
2087
  API_END();
2088
2089
}

Guolin Ke's avatar
Guolin Ke committed
2090
int LGBM_BoosterUpdateOneIterCustom(BoosterHandle handle,
2091
2092
                                    const float* grad,
                                    const float* hess,
2093
                                    int* produced_empty_tree) {
2094
  API_BEGIN();
2095
  #ifdef SCORE_T_USE_DOUBLE
2096
2097
2098
  (void) handle;       // UNUSED VARIABLE
  (void) grad;         // UNUSED VARIABLE
  (void) hess;         // UNUSED VARIABLE
2099
  (void) produced_empty_tree;  // UNUSED VARIABLE
2100
  Log::Fatal("Don't support custom loss function when SCORE_T_USE_DOUBLE is enabled");
2101
  #else
2102
  Booster* ref_booster = reinterpret_cast<Booster*>(handle);
2103
  if (ref_booster->TrainOneIter(grad, hess)) {
2104
    *produced_empty_tree = 1;
2105
  } else {
2106
    *produced_empty_tree = 0;
2107
  }
2108
  #endif
2109
  API_END();
2110
2111
}

Guolin Ke's avatar
Guolin Ke committed
2112
int LGBM_BoosterRollbackOneIter(BoosterHandle handle) {
wxchan's avatar
wxchan committed
2113
2114
2115
2116
2117
2118
  API_BEGIN();
  Booster* ref_booster = reinterpret_cast<Booster*>(handle);
  ref_booster->RollbackOneIter();
  API_END();
}

Guolin Ke's avatar
Guolin Ke committed
2119
int LGBM_BoosterGetCurrentIteration(BoosterHandle handle, int* out_iteration) {
wxchan's avatar
wxchan committed
2120
2121
2122
2123
2124
  API_BEGIN();
  Booster* ref_booster = reinterpret_cast<Booster*>(handle);
  *out_iteration = ref_booster->GetBoosting()->GetCurrentIteration();
  API_END();
}
Guolin Ke's avatar
Guolin Ke committed
2125

2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
int LGBM_BoosterNumModelPerIteration(BoosterHandle handle, int* out_tree_per_iteration) {
  API_BEGIN();
  Booster* ref_booster = reinterpret_cast<Booster*>(handle);
  *out_tree_per_iteration = ref_booster->GetBoosting()->NumModelPerIteration();
  API_END();
}

int LGBM_BoosterNumberOfTotalModel(BoosterHandle handle, int* out_models) {
  API_BEGIN();
  Booster* ref_booster = reinterpret_cast<Booster*>(handle);
  *out_models = ref_booster->GetBoosting()->NumberOfTotalModel();
  API_END();
}

Guolin Ke's avatar
Guolin Ke committed
2140
int LGBM_BoosterGetEvalCounts(BoosterHandle handle, int* out_len) {
wxchan's avatar
wxchan committed
2141
2142
2143
2144
2145
2146
  API_BEGIN();
  Booster* ref_booster = reinterpret_cast<Booster*>(handle);
  *out_len = ref_booster->GetEvalCounts();
  API_END();
}

2147
2148
2149
2150
2151
2152
int LGBM_BoosterGetEvalNames(BoosterHandle handle,
                             const int len,
                             int* out_len,
                             const size_t buffer_len,
                             size_t* out_buffer_len,
                             char** out_strs) {
wxchan's avatar
wxchan committed
2153
2154
  API_BEGIN();
  Booster* ref_booster = reinterpret_cast<Booster*>(handle);
2155
  *out_len = ref_booster->GetEvalNames(out_strs, len, buffer_len, out_buffer_len);
wxchan's avatar
wxchan committed
2156
2157
2158
  API_END();
}

2159
2160
2161
2162
2163
2164
int LGBM_BoosterGetFeatureNames(BoosterHandle handle,
                                const int len,
                                int* out_len,
                                const size_t buffer_len,
                                size_t* out_buffer_len,
                                char** out_strs) {
wxchan's avatar
wxchan committed
2165
2166
  API_BEGIN();
  Booster* ref_booster = reinterpret_cast<Booster*>(handle);
2167
  *out_len = ref_booster->GetFeatureNames(out_strs, len, buffer_len, out_buffer_len);
wxchan's avatar
wxchan committed
2168
2169
2170
  API_END();
}

Guolin Ke's avatar
Guolin Ke committed
2171
int LGBM_BoosterGetNumFeature(BoosterHandle handle, int* out_len) {
wxchan's avatar
wxchan committed
2172
2173
2174
2175
2176
2177
  API_BEGIN();
  Booster* ref_booster = reinterpret_cast<Booster*>(handle);
  *out_len = ref_booster->GetBoosting()->MaxFeatureIdx() + 1;
  API_END();
}

Guolin Ke's avatar
Guolin Ke committed
2178
int LGBM_BoosterGetEval(BoosterHandle handle,
2179
2180
2181
                        int data_idx,
                        int* out_len,
                        double* out_results) {
2182
  API_BEGIN();
Guolin Ke's avatar
Guolin Ke committed
2183
  Booster* ref_booster = reinterpret_cast<Booster*>(handle);
2184
  auto boosting = ref_booster->GetBoosting();
wxchan's avatar
wxchan committed
2185
  auto result_buf = boosting->GetEvalAt(data_idx);
Guolin Ke's avatar
Guolin Ke committed
2186
  *out_len = static_cast<int>(result_buf.size());
2187
  for (size_t i = 0; i < result_buf.size(); ++i) {
Guolin Ke's avatar
Guolin Ke committed
2188
    (out_results)[i] = static_cast<double>(result_buf[i]);
2189
  }
2190
  API_END();
2191
2192
}

Guolin Ke's avatar
Guolin Ke committed
2193
int LGBM_BoosterGetNumPredict(BoosterHandle handle,
2194
2195
                              int data_idx,
                              int64_t* out_len) {
Guolin Ke's avatar
Guolin Ke committed
2196
2197
2198
2199
2200
2201
  API_BEGIN();
  auto boosting = reinterpret_cast<Booster*>(handle)->GetBoosting();
  *out_len = boosting->GetNumPredictAt(data_idx);
  API_END();
}

Guolin Ke's avatar
Guolin Ke committed
2202
int LGBM_BoosterGetPredict(BoosterHandle handle,
2203
2204
2205
                           int data_idx,
                           int64_t* out_len,
                           double* out_result) {
2206
  API_BEGIN();
Guolin Ke's avatar
Guolin Ke committed
2207
  Booster* ref_booster = reinterpret_cast<Booster*>(handle);
2208
  ref_booster->GetPredictAt(data_idx, out_result, out_len);
2209
  API_END();
Guolin Ke's avatar
Guolin Ke committed
2210
2211
}

Guolin Ke's avatar
Guolin Ke committed
2212
int LGBM_BoosterPredictForFile(BoosterHandle handle,
2213
2214
2215
                               const char* data_filename,
                               int data_has_header,
                               int predict_type,
2216
                               int start_iteration,
2217
                               int num_iteration,
2218
                               const char* parameter,
2219
                               const char* result_filename) {
2220
  API_BEGIN();
Guolin Ke's avatar
Guolin Ke committed
2221
2222
  auto param = Config::Str2Map(parameter);
  Config config;
Guolin Ke's avatar
Guolin Ke committed
2223
  config.Set(param);
2224
  OMP_SET_NUM_THREADS(config.num_threads);
Guolin Ke's avatar
Guolin Ke committed
2225
  Booster* ref_booster = reinterpret_cast<Booster*>(handle);
2226
  ref_booster->Predict(start_iteration, num_iteration, predict_type, data_filename, data_has_header,
Guolin Ke's avatar
Guolin Ke committed
2227
                       config, result_filename);
2228
  API_END();
2229
2230
}

Guolin Ke's avatar
Guolin Ke committed
2231
int LGBM_BoosterCalcNumPredict(BoosterHandle handle,
2232
2233
                               int num_row,
                               int predict_type,
2234
                               int start_iteration,
2235
2236
                               int num_iteration,
                               int64_t* out_len) {
Guolin Ke's avatar
Guolin Ke committed
2237
2238
  API_BEGIN();
  Booster* ref_booster = reinterpret_cast<Booster*>(handle);
2239
  *out_len = static_cast<int64_t>(num_row) * ref_booster->GetBoosting()->NumPredictOneRow(start_iteration,
2240
    num_iteration, predict_type == C_API_PREDICT_LEAF_INDEX, predict_type == C_API_PREDICT_CONTRIB);
Guolin Ke's avatar
Guolin Ke committed
2241
2242
2243
  API_END();
}

2244
2245
2246
2247
2248
2249
// Naming: In future versions of LightGBM, public API named around `FastConfig` should be made named around
// `SingleRowPredictor`, because it is specific to single row prediction, and doesn't actually hold only config.
// For now this is kept as `FastConfig` for backwards compatibility.
// At the same time, one should consider removing the old non-fast single row public API that stores its Predictor
// in the Booster, because that will enable removing these Predictors from the Booster, and associated initialization
// code.
2250
2251
int LGBM_FastConfigFree(FastConfigHandle fastConfig) {
  API_BEGIN();
2252
  delete reinterpret_cast<SingleRowPredictor*>(fastConfig);
2253
2254
2255
  API_END();
}

Guolin Ke's avatar
Guolin Ke committed
2256
int LGBM_BoosterPredictForCSR(BoosterHandle handle,
2257
2258
2259
2260
2261
2262
2263
                              const void* indptr,
                              int indptr_type,
                              const int32_t* indices,
                              const void* data,
                              int data_type,
                              int64_t nindptr,
                              int64_t nelem,
2264
                              int64_t num_col,
2265
                              int predict_type,
2266
                              int start_iteration,
2267
                              int num_iteration,
2268
                              const char* parameter,
2269
2270
                              int64_t* out_len,
                              double* out_result) {
2271
  API_BEGIN();
2272
2273
2274
2275
2276
  if (num_col <= 0) {
    Log::Fatal("The number of columns should be greater than zero.");
  } else if (num_col >= INT32_MAX) {
    Log::Fatal("The number of columns should be smaller than INT32_MAX.");
  }
Guolin Ke's avatar
Guolin Ke committed
2277
2278
  auto param = Config::Str2Map(parameter);
  Config config;
Guolin Ke's avatar
Guolin Ke committed
2279
  config.Set(param);
2280
  OMP_SET_NUM_THREADS(config.num_threads);
Guolin Ke's avatar
Guolin Ke committed
2281
  Booster* ref_booster = reinterpret_cast<Booster*>(handle);
2282
  auto get_row_fun = RowFunctionFromCSR<int>(indptr, indptr_type, indices, data, data_type, nindptr, nelem);
Guolin Ke's avatar
Guolin Ke committed
2283
  int nrow = static_cast<int>(nindptr - 1);
2284
  ref_booster->Predict(start_iteration, num_iteration, predict_type, nrow, static_cast<int>(num_col), get_row_fun,
Guolin Ke's avatar
Guolin Ke committed
2285
                       config, out_result, out_len);
2286
  API_END();
Guolin Ke's avatar
Guolin Ke committed
2287
}
2288

2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
int LGBM_BoosterPredictSparseOutput(BoosterHandle handle,
                                    const void* indptr,
                                    int indptr_type,
                                    const int32_t* indices,
                                    const void* data,
                                    int data_type,
                                    int64_t nindptr,
                                    int64_t nelem,
                                    int64_t num_col_or_row,
                                    int predict_type,
2299
                                    int start_iteration,
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
                                    int num_iteration,
                                    const char* parameter,
                                    int matrix_type,
                                    int64_t* out_len,
                                    void** out_indptr,
                                    int32_t** out_indices,
                                    void** out_data) {
  API_BEGIN();
  Booster* ref_booster = reinterpret_cast<Booster*>(handle);
  auto param = Config::Str2Map(parameter);
  Config config;
  config.Set(param);
2312
  OMP_SET_NUM_THREADS(config.num_threads);
2313
2314
2315
2316
2317
2318
2319
2320
  if (matrix_type == C_API_MATRIX_TYPE_CSR) {
    if (num_col_or_row <= 0) {
      Log::Fatal("The number of columns should be greater than zero.");
    } else if (num_col_or_row >= INT32_MAX) {
      Log::Fatal("The number of columns should be smaller than INT32_MAX.");
    }
    auto get_row_fun = RowFunctionFromCSR<int64_t>(indptr, indptr_type, indices, data, data_type, nindptr, nelem);
    int64_t nrow = nindptr - 1;
2321
    ref_booster->PredictSparseCSR(start_iteration, num_iteration, predict_type, nrow, static_cast<int>(num_col_or_row), get_row_fun,
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
                                  config, out_len, out_indptr, indptr_type, out_indices, out_data, data_type);
  } else if (matrix_type == C_API_MATRIX_TYPE_CSC) {
    int num_threads = OMP_NUM_THREADS();
    int ncol = static_cast<int>(nindptr - 1);
    std::vector<std::vector<CSC_RowIterator>> iterators(num_threads, std::vector<CSC_RowIterator>());
    for (int i = 0; i < num_threads; ++i) {
      for (int j = 0; j < ncol; ++j) {
        iterators[i].emplace_back(indptr, indptr_type, indices, data, data_type, nindptr, nelem, j);
      }
    }
    std::function<std::vector<std::pair<int, double>>(int64_t row_idx)> get_row_fun =
      [&iterators, ncol](int64_t i) {
      std::vector<std::pair<int, double>> one_row;
      one_row.reserve(ncol);
      const int tid = omp_get_thread_num();
      for (int j = 0; j < ncol; ++j) {
        auto val = iterators[tid][j].Get(static_cast<int>(i));
        if (std::fabs(val) > kZeroThreshold || std::isnan(val)) {
          one_row.emplace_back(j, val);
        }
      }
      return one_row;
    };
2345
    ref_booster->PredictSparseCSC(start_iteration, num_iteration, predict_type, num_col_or_row, ncol, get_row_fun, config,
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
                                  out_len, out_indptr, indptr_type, out_indices, out_data, data_type);
  } else {
    Log::Fatal("Unknown matrix type in LGBM_BoosterPredictSparseOutput");
  }
  API_END();
}

int LGBM_BoosterFreePredictSparse(void* indptr, int32_t* indices, void* data, int indptr_type, int data_type) {
  API_BEGIN();
  if (indptr_type == C_API_DTYPE_INT32) {
2356
    delete[] reinterpret_cast<int32_t*>(indptr);
2357
  } else if (indptr_type == C_API_DTYPE_INT64) {
2358
    delete[] reinterpret_cast<int64_t*>(indptr);
2359
2360
2361
  } else {
    Log::Fatal("Unknown indptr type in LGBM_BoosterFreePredictSparse");
  }
2362
  delete[] indices;
2363
  if (data_type == C_API_DTYPE_FLOAT32) {
2364
    delete[] reinterpret_cast<float*>(data);
2365
  } else if (data_type == C_API_DTYPE_FLOAT64) {
2366
    delete[] reinterpret_cast<double*>(data);
2367
2368
2369
2370
2371
2372
  } else {
    Log::Fatal("Unknown data type in LGBM_BoosterFreePredictSparse");
  }
  API_END();
}

2373
int LGBM_BoosterPredictForCSRSingleRow(BoosterHandle handle,
2374
2375
2376
2377
2378
2379
2380
                                       const void* indptr,
                                       int indptr_type,
                                       const int32_t* indices,
                                       const void* data,
                                       int data_type,
                                       int64_t nindptr,
                                       int64_t nelem,
2381
                                       int64_t num_col,
2382
                                       int predict_type,
2383
                                       int start_iteration,
2384
2385
2386
2387
                                       int num_iteration,
                                       const char* parameter,
                                       int64_t* out_len,
                                       double* out_result) {
2388
  API_BEGIN();
2389
2390
2391
2392
2393
  if (num_col <= 0) {
    Log::Fatal("The number of columns should be greater than zero.");
  } else if (num_col >= INT32_MAX) {
    Log::Fatal("The number of columns should be smaller than INT32_MAX.");
  }
2394
2395
2396
  auto param = Config::Str2Map(parameter);
  Config config;
  config.Set(param);
2397
  OMP_SET_NUM_THREADS(config.num_threads);
2398
  Booster* ref_booster = reinterpret_cast<Booster*>(handle);
2399
  auto get_row_fun = RowFunctionFromCSR<int>(indptr, indptr_type, indices, data, data_type, nindptr, nelem);
2400
  ref_booster->SetSingleRowPredictorInner(start_iteration, num_iteration, predict_type, config);
2401
  ref_booster->PredictSingleRow(predict_type, static_cast<int32_t>(num_col), get_row_fun, config, out_result, out_len);
2402
2403
2404
  API_END();
}

2405
int LGBM_BoosterPredictForCSRSingleRowFastInit(BoosterHandle handle,
2406
                                               const int predict_type,
2407
                                               const int start_iteration,
2408
                                               const int num_iteration,
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
                                               const int data_type,
                                               const int64_t num_col,
                                               const char* parameter,
                                               FastConfigHandle *out_fastConfig) {
  API_BEGIN();
  if (num_col <= 0) {
    Log::Fatal("The number of columns should be greater than zero.");
  } else if (num_col >= INT32_MAX) {
    Log::Fatal("The number of columns should be smaller than INT32_MAX.");
  }

2420
  Booster* ref_booster = reinterpret_cast<Booster*>(handle);
2421

2422
2423
  std::unique_ptr<SingleRowPredictor> single_row_predictor =
    ref_booster->InitSingleRowPredictor(start_iteration, num_iteration, predict_type, data_type, static_cast<int32_t>(num_col), parameter);
2424

2425
  OMP_SET_NUM_THREADS(single_row_predictor->config.num_threads);
2426

2427
  *out_fastConfig = single_row_predictor.release();
2428
2429
2430
2431
2432
  API_END();
}

int LGBM_BoosterPredictForCSRSingleRowFast(FastConfigHandle fastConfig_handle,
                                           const void* indptr,
2433
                                           const int indptr_type,
2434
2435
                                           const int32_t* indices,
                                           const void* data,
2436
2437
                                           const int64_t nindptr,
                                           const int64_t nelem,
2438
2439
2440
                                           int64_t* out_len,
                                           double* out_result) {
  API_BEGIN();
2441
2442
2443
  SingleRowPredictor *single_row_predictor = reinterpret_cast<SingleRowPredictor*>(fastConfig_handle);
  auto get_row_fun = RowFunctionFromCSR<int>(indptr, indptr_type, indices, data, single_row_predictor->data_type, nindptr, nelem);
  single_row_predictor->Predict(get_row_fun, out_result, out_len);
2444
2445
2446
  API_END();
}

2447

Guolin Ke's avatar
Guolin Ke committed
2448
int LGBM_BoosterPredictForCSC(BoosterHandle handle,
2449
2450
2451
2452
2453
2454
2455
2456
2457
                              const void* col_ptr,
                              int col_ptr_type,
                              const int32_t* indices,
                              const void* data,
                              int data_type,
                              int64_t ncol_ptr,
                              int64_t nelem,
                              int64_t num_row,
                              int predict_type,
2458
                              int start_iteration,
2459
                              int num_iteration,
2460
                              const char* parameter,
2461
2462
                              int64_t* out_len,
                              double* out_result) {
Guolin Ke's avatar
Guolin Ke committed
2463
2464
  API_BEGIN();
  Booster* ref_booster = reinterpret_cast<Booster*>(handle);
Guolin Ke's avatar
Guolin Ke committed
2465
2466
  auto param = Config::Str2Map(parameter);
  Config config;
Guolin Ke's avatar
Guolin Ke committed
2467
  config.Set(param);
2468
  OMP_SET_NUM_THREADS(config.num_threads);
2469
  int num_threads = OMP_NUM_THREADS();
Guolin Ke's avatar
Guolin Ke committed
2470
  int ncol = static_cast<int>(ncol_ptr - 1);
Guolin Ke's avatar
Guolin Ke committed
2471
2472
2473
2474
2475
  std::vector<std::vector<CSC_RowIterator>> iterators(num_threads, std::vector<CSC_RowIterator>());
  for (int i = 0; i < num_threads; ++i) {
    for (int j = 0; j < ncol; ++j) {
      iterators[i].emplace_back(col_ptr, col_ptr_type, indices, data, data_type, ncol_ptr, nelem, j);
    }
Guolin Ke's avatar
Guolin Ke committed
2476
2477
  }
  std::function<std::vector<std::pair<int, double>>(int row_idx)> get_row_fun =
Guolin Ke's avatar
Guolin Ke committed
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
      [&iterators, ncol](int i) {
        std::vector<std::pair<int, double>> one_row;
        one_row.reserve(ncol);
        const int tid = omp_get_thread_num();
        for (int j = 0; j < ncol; ++j) {
          auto val = iterators[tid][j].Get(i);
          if (std::fabs(val) > kZeroThreshold || std::isnan(val)) {
            one_row.emplace_back(j, val);
          }
        }
        return one_row;
      };
2490
  ref_booster->Predict(start_iteration, num_iteration, predict_type, static_cast<int>(num_row), ncol, get_row_fun, config,
cbecker's avatar
cbecker committed
2491
                       out_result, out_len);
Guolin Ke's avatar
Guolin Ke committed
2492
2493
2494
  API_END();
}

2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
int LGBM_BoosterValidateFeatureNames(BoosterHandle handle,
                                     const char** data_names,
                                     int data_num_features) {
  API_BEGIN();
  int booster_num_features;
  size_t out_buffer_len;
  LGBM_BoosterGetFeatureNames(handle, 0, &booster_num_features, 0, &out_buffer_len, nullptr);
  if (booster_num_features != data_num_features) {
    Log::Fatal("Model was trained on %d features, but got %d input features to predict.", booster_num_features, data_num_features);
  }
  std::vector<std::vector<char>> tmp_names(booster_num_features, std::vector<char>(out_buffer_len));
  std::vector<char*> booster_names = Vector2Ptr(&tmp_names);
  LGBM_BoosterGetFeatureNames(handle, data_num_features, &booster_num_features, out_buffer_len, &out_buffer_len, booster_names.data());
  for (int i = 0; i < booster_num_features; ++i) {
    if (strcmp(data_names[i], booster_names[i]) != 0) {
      Log::Fatal("Expected '%s' at position %d but found '%s'", booster_names[i], i, data_names[i]);
    }
  }
  API_END();
}

Guolin Ke's avatar
Guolin Ke committed
2516
int LGBM_BoosterPredictForMat(BoosterHandle handle,
2517
2518
2519
2520
2521
2522
                              const void* data,
                              int data_type,
                              int32_t nrow,
                              int32_t ncol,
                              int is_row_major,
                              int predict_type,
2523
                              int start_iteration,
2524
                              int num_iteration,
2525
                              const char* parameter,
2526
2527
                              int64_t* out_len,
                              double* out_result) {
2528
  API_BEGIN();
Guolin Ke's avatar
Guolin Ke committed
2529
2530
  auto param = Config::Str2Map(parameter);
  Config config;
Guolin Ke's avatar
Guolin Ke committed
2531
  config.Set(param);
2532
  OMP_SET_NUM_THREADS(config.num_threads);
Guolin Ke's avatar
Guolin Ke committed
2533
  Booster* ref_booster = reinterpret_cast<Booster*>(handle);
2534
  auto get_row_fun = RowPairFunctionFromDenseMatric(data, nrow, ncol, data_type, is_row_major);
2535
  ref_booster->Predict(start_iteration, num_iteration, predict_type, nrow, ncol, get_row_fun,
Guolin Ke's avatar
Guolin Ke committed
2536
                       config, out_result, out_len);
2537
  API_END();
Guolin Ke's avatar
Guolin Ke committed
2538
}
2539

2540
int LGBM_BoosterPredictForMatSingleRow(BoosterHandle handle,
2541
2542
2543
2544
2545
                                       const void* data,
                                       int data_type,
                                       int32_t ncol,
                                       int is_row_major,
                                       int predict_type,
2546
                                       int start_iteration,
2547
2548
2549
2550
                                       int num_iteration,
                                       const char* parameter,
                                       int64_t* out_len,
                                       double* out_result) {
2551
2552
2553
2554
  API_BEGIN();
  auto param = Config::Str2Map(parameter);
  Config config;
  config.Set(param);
2555
  OMP_SET_NUM_THREADS(config.num_threads);
2556
2557
  Booster* ref_booster = reinterpret_cast<Booster*>(handle);
  auto get_row_fun = RowPairFunctionFromDenseMatric(data, 1, ncol, data_type, is_row_major);
2558
  ref_booster->SetSingleRowPredictorInner(start_iteration, num_iteration, predict_type, config);
2559
  ref_booster->PredictSingleRow(predict_type, ncol, get_row_fun, config, out_result, out_len);
2560
2561
2562
  API_END();
}

2563
int LGBM_BoosterPredictForMatSingleRowFastInit(BoosterHandle handle,
2564
                                               const int predict_type,
2565
                                               const int start_iteration,
2566
                                               const int num_iteration,
2567
2568
2569
2570
2571
                                               const int data_type,
                                               const int32_t ncol,
                                               const char* parameter,
                                               FastConfigHandle *out_fastConfig) {
  API_BEGIN();
2572
  Booster* ref_booster = reinterpret_cast<Booster*>(handle);
2573

2574
2575
  std::unique_ptr<SingleRowPredictor> single_row_predictor =
    ref_booster->InitSingleRowPredictor(predict_type, start_iteration, num_iteration, data_type, ncol, parameter);
2576

2577
  OMP_SET_NUM_THREADS(single_row_predictor->config.num_threads);
2578

2579
  *out_fastConfig = single_row_predictor.release();
2580
2581
2582
2583
2584
2585
2586
2587
  API_END();
}

int LGBM_BoosterPredictForMatSingleRowFast(FastConfigHandle fastConfig_handle,
                                           const void* data,
                                           int64_t* out_len,
                                           double* out_result) {
  API_BEGIN();
2588
  SingleRowPredictor *single_row_predictor = reinterpret_cast<SingleRowPredictor*>(fastConfig_handle);
2589
  // Single row in row-major format:
2590
2591
  auto get_row_fun = RowPairFunctionFromDenseMatric(data, 1, single_row_predictor->num_cols, single_row_predictor->data_type, 1);
  single_row_predictor->Predict(get_row_fun, out_result, out_len);
2592
2593
2594
  API_END();
}

2595

2596
2597
2598
2599
2600
2601
int LGBM_BoosterPredictForMats(BoosterHandle handle,
                               const void** data,
                               int data_type,
                               int32_t nrow,
                               int32_t ncol,
                               int predict_type,
2602
                               int start_iteration,
2603
2604
2605
2606
2607
2608
2609
2610
                               int num_iteration,
                               const char* parameter,
                               int64_t* out_len,
                               double* out_result) {
  API_BEGIN();
  auto param = Config::Str2Map(parameter);
  Config config;
  config.Set(param);
2611
  OMP_SET_NUM_THREADS(config.num_threads);
2612
2613
  Booster* ref_booster = reinterpret_cast<Booster*>(handle);
  auto get_row_fun = RowPairFunctionFromDenseRows(data, ncol, data_type);
2614
  ref_booster->Predict(start_iteration, num_iteration, predict_type, nrow, ncol, get_row_fun, config, out_result, out_len);
2615
2616
2617
  API_END();
}

2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
int LGBM_BoosterPredictForArrow(BoosterHandle handle,
                                int64_t n_chunks,
                                const ArrowArray* chunks,
                                const ArrowSchema* schema,
                                int predict_type,
                                int start_iteration,
                                int num_iteration,
                                const char* parameter,
                                int64_t* out_len,
                                double* out_result) {
  API_BEGIN();

  // Apply the configuration
  auto param = Config::Str2Map(parameter);
  Config config;
  config.Set(param);
  OMP_SET_NUM_THREADS(config.num_threads);

  // Set up chunked array and iterators for all columns
  ArrowTable table(n_chunks, chunks, schema);
  std::vector<ArrowChunkedArray::Iterator<double>> its;
  its.reserve(table.get_num_columns());
  for (int64_t j = 0; j < table.get_num_columns(); ++j) {
    its.emplace_back(table.get_column(j).begin<double>());
  }

  // Build row function
  auto num_columns = table.get_num_columns();
  auto row_fn = [num_columns, &its] (int row_idx) {
    std::vector<std::pair<int, double>> result;
    result.reserve(num_columns);
    for (int64_t j = 0; j < num_columns; ++j) {
      result.emplace_back(static_cast<int>(j), its[j][row_idx]);
    }
    return result;
  };

  // Run prediction
  Booster* ref_booster = reinterpret_cast<Booster*>(handle);
  ref_booster->Predict(start_iteration,
                       num_iteration,
                       predict_type,
                       static_cast<int>(table.get_num_rows()),
                       static_cast<int>(table.get_num_columns()),
                       row_fn,
                       config,
                       out_result,
                       out_len);
  API_END();
}

Guolin Ke's avatar
Guolin Ke committed
2669
int LGBM_BoosterSaveModel(BoosterHandle handle,
2670
                          int start_iteration,
2671
                          int num_iteration,
2672
                          int feature_importance_type,
2673
                          const char* filename) {
2674
  API_BEGIN();
Guolin Ke's avatar
Guolin Ke committed
2675
  Booster* ref_booster = reinterpret_cast<Booster*>(handle);
2676
2677
  ref_booster->SaveModelToFile(start_iteration, num_iteration,
                               feature_importance_type, filename);
wxchan's avatar
wxchan committed
2678
2679
2680
  API_END();
}

Guolin Ke's avatar
Guolin Ke committed
2681
int LGBM_BoosterSaveModelToString(BoosterHandle handle,
2682
                                  int start_iteration,
2683
                                  int num_iteration,
2684
                                  int feature_importance_type,
2685
                                  int64_t buffer_len,
2686
                                  int64_t* out_len,
2687
                                  char* out_str) {
2688
2689
  API_BEGIN();
  Booster* ref_booster = reinterpret_cast<Booster*>(handle);
2690
2691
  std::string model = ref_booster->SaveModelToString(
      start_iteration, num_iteration, feature_importance_type);
2692
  *out_len = static_cast<int64_t>(model.size()) + 1;
2693
  if (*out_len <= buffer_len) {
Guolin Ke's avatar
Guolin Ke committed
2694
    std::memcpy(out_str, model.c_str(), *out_len);
2695
2696
2697
2698
  }
  API_END();
}

Guolin Ke's avatar
Guolin Ke committed
2699
int LGBM_BoosterDumpModel(BoosterHandle handle,
2700
                          int start_iteration,
2701
                          int num_iteration,
2702
                          int feature_importance_type,
2703
2704
                          int64_t buffer_len,
                          int64_t* out_len,
2705
                          char* out_str) {
wxchan's avatar
wxchan committed
2706
2707
  API_BEGIN();
  Booster* ref_booster = reinterpret_cast<Booster*>(handle);
2708
2709
  std::string model = ref_booster->DumpModel(start_iteration, num_iteration,
                                             feature_importance_type);
2710
  *out_len = static_cast<int64_t>(model.size()) + 1;
wxchan's avatar
wxchan committed
2711
  if (*out_len <= buffer_len) {
Guolin Ke's avatar
Guolin Ke committed
2712
    std::memcpy(out_str, model.c_str(), *out_len);
wxchan's avatar
wxchan committed
2713
  }
2714
  API_END();
Guolin Ke's avatar
Guolin Ke committed
2715
}
2716

Guolin Ke's avatar
Guolin Ke committed
2717
int LGBM_BoosterGetLeafValue(BoosterHandle handle,
2718
2719
2720
                             int tree_idx,
                             int leaf_idx,
                             double* out_val) {
Guolin Ke's avatar
Guolin Ke committed
2721
2722
  API_BEGIN();
  Booster* ref_booster = reinterpret_cast<Booster*>(handle);
Guolin Ke's avatar
Guolin Ke committed
2723
  *out_val = static_cast<double>(ref_booster->GetLeafValue(tree_idx, leaf_idx));
Guolin Ke's avatar
Guolin Ke committed
2724
2725
2726
  API_END();
}

Guolin Ke's avatar
Guolin Ke committed
2727
int LGBM_BoosterSetLeafValue(BoosterHandle handle,
2728
2729
2730
                             int tree_idx,
                             int leaf_idx,
                             double val) {
Guolin Ke's avatar
Guolin Ke committed
2731
2732
  API_BEGIN();
  Booster* ref_booster = reinterpret_cast<Booster*>(handle);
Guolin Ke's avatar
Guolin Ke committed
2733
  ref_booster->SetLeafValue(tree_idx, leaf_idx, val);
Guolin Ke's avatar
Guolin Ke committed
2734
2735
2736
  API_END();
}

2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
int LGBM_BoosterFeatureImportance(BoosterHandle handle,
                                  int num_iteration,
                                  int importance_type,
                                  double* out_results) {
  API_BEGIN();
  Booster* ref_booster = reinterpret_cast<Booster*>(handle);
  std::vector<double> feature_importances = ref_booster->FeatureImportance(num_iteration, importance_type);
  for (size_t i = 0; i < feature_importances.size(); ++i) {
    (out_results)[i] = feature_importances[i];
  }
  API_END();
}

2750
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762
2763
2764
2765
2766
2767
int LGBM_BoosterGetUpperBoundValue(BoosterHandle handle,
                                   double* out_results) {
  API_BEGIN();
  Booster* ref_booster = reinterpret_cast<Booster*>(handle);
  double max_value = ref_booster->UpperBoundValue();
  *out_results = max_value;
  API_END();
}

int LGBM_BoosterGetLowerBoundValue(BoosterHandle handle,
                                   double* out_results) {
  API_BEGIN();
  Booster* ref_booster = reinterpret_cast<Booster*>(handle);
  double min_value = ref_booster->LowerBoundValue();
  *out_results = min_value;
  API_END();
}

2768
2769
2770
2771
2772
int LGBM_NetworkInit(const char* machines,
                     int local_listen_port,
                     int listen_time_out,
                     int num_machines) {
  API_BEGIN();
Guolin Ke's avatar
Guolin Ke committed
2773
  Config config;
2774
  config.machines = RemoveQuotationSymbol(std::string(machines));
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
  config.local_listen_port = local_listen_port;
  config.num_machines = num_machines;
  config.time_out = listen_time_out;
  if (num_machines > 1) {
    Network::Init(config);
  }
  API_END();
}

int LGBM_NetworkFree() {
  API_BEGIN();
  Network::Dispose();
  API_END();
}

2790
2791
2792
int LGBM_NetworkInitWithFunctions(int num_machines, int rank,
                                  void* reduce_scatter_ext_fun,
                                  void* allgather_ext_fun) {
ww's avatar
ww committed
2793
  API_BEGIN();
Guolin Ke's avatar
Guolin Ke committed
2794
  if (num_machines > 1) {
2795
    Network::Init(num_machines, rank, (ReduceScatterFunction)reduce_scatter_ext_fun, (AllgatherFunction)allgather_ext_fun);
ww's avatar
ww committed
2796
2797
2798
  }
  API_END();
}
Guolin Ke's avatar
Guolin Ke committed
2799

2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816
int LGBM_SetMaxThreads(int num_threads) {
  API_BEGIN();
  if (num_threads <= 0) {
    LGBM_MAX_NUM_THREADS = -1;
  } else {
    LGBM_MAX_NUM_THREADS = num_threads;
  }
  API_END();
}

int LGBM_GetMaxThreads(int* out) {
  API_BEGIN();
  *out = LGBM_MAX_NUM_THREADS;
  API_END();
}


Guolin Ke's avatar
Guolin Ke committed
2817
// ---- start of some help functions
2818

2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840
2841
2842
2843

template<typename T>
std::function<std::vector<double>(int row_idx)>
RowFunctionFromDenseMatric_helper(const void* data, int num_row, int num_col, int is_row_major) {
  const T* data_ptr = reinterpret_cast<const T*>(data);
  if (is_row_major) {
    return [=] (int row_idx) {
      std::vector<double> ret(num_col);
      auto tmp_ptr = data_ptr + static_cast<size_t>(num_col) * row_idx;
      for (int i = 0; i < num_col; ++i) {
        ret[i] = static_cast<double>(*(tmp_ptr + i));
      }
      return ret;
    };
  } else {
    return [=] (int row_idx) {
      std::vector<double> ret(num_col);
      for (int i = 0; i < num_col; ++i) {
        ret[i] = static_cast<double>(*(data_ptr + static_cast<size_t>(num_row) * i + row_idx));
      }
      return ret;
    };
  }
}

2844
2845
std::function<std::vector<double>(int row_idx)>
RowFunctionFromDenseMatric(const void* data, int num_row, int num_col, int data_type, int is_row_major) {
Guolin Ke's avatar
Guolin Ke committed
2846
  if (data_type == C_API_DTYPE_FLOAT32) {
2847
    return RowFunctionFromDenseMatric_helper<float>(data, num_row, num_col, is_row_major);
Guolin Ke's avatar
Guolin Ke committed
2848
  } else if (data_type == C_API_DTYPE_FLOAT64) {
2849
    return RowFunctionFromDenseMatric_helper<double>(data, num_row, num_col, is_row_major);
2850
  }
2851
  Log::Fatal("Unknown data type in RowFunctionFromDenseMatric");
2852
  return nullptr;
2853
2854
2855
2856
}

std::function<std::vector<std::pair<int, double>>(int row_idx)>
RowPairFunctionFromDenseMatric(const void* data, int num_row, int num_col, int data_type, int is_row_major) {
Guolin Ke's avatar
Guolin Ke committed
2857
2858
  auto inner_function = RowFunctionFromDenseMatric(data, num_row, num_col, data_type, is_row_major);
  if (inner_function != nullptr) {
2859
    return [inner_function] (int row_idx) {
Guolin Ke's avatar
Guolin Ke committed
2860
2861
      auto raw_values = inner_function(row_idx);
      std::vector<std::pair<int, double>> ret;
Guolin Ke's avatar
Guolin Ke committed
2862
      ret.reserve(raw_values.size());
Guolin Ke's avatar
Guolin Ke committed
2863
      for (int i = 0; i < static_cast<int>(raw_values.size()); ++i) {
Guolin Ke's avatar
Guolin Ke committed
2864
        if (std::fabs(raw_values[i]) > kZeroThreshold || std::isnan(raw_values[i])) {
Guolin Ke's avatar
Guolin Ke committed
2865
          ret.emplace_back(i, raw_values[i]);
2866
        }
Guolin Ke's avatar
Guolin Ke committed
2867
2868
2869
      }
      return ret;
    };
2870
  }
Guolin Ke's avatar
Guolin Ke committed
2871
  return nullptr;
2872
2873
}

2874
2875
2876
2877
2878
2879
2880
// data is array of pointers to individual rows
std::function<std::vector<std::pair<int, double>>(int row_idx)>
RowPairFunctionFromDenseRows(const void** data, int num_col, int data_type) {
  return [=](int row_idx) {
    auto inner_function = RowFunctionFromDenseMatric(data[row_idx], 1, num_col, data_type, /* is_row_major */ true);
    auto raw_values = inner_function(0);
    std::vector<std::pair<int, double>> ret;
Guolin Ke's avatar
Guolin Ke committed
2881
    ret.reserve(raw_values.size());
2882
2883
2884
2885
2886
2887
2888
2889
2890
    for (int i = 0; i < static_cast<int>(raw_values.size()); ++i) {
      if (std::fabs(raw_values[i]) > kZeroThreshold || std::isnan(raw_values[i])) {
        ret.emplace_back(i, raw_values[i]);
      }
    }
    return ret;
  };
}

2891
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909
template<typename T, typename T1, typename T2>
std::function<std::vector<std::pair<int, double>>(T idx)>
RowFunctionFromCSR_helper(const void* indptr, const int32_t* indices, const void* data) {
  const T1* data_ptr = reinterpret_cast<const T1*>(data);
  const T2* ptr_indptr = reinterpret_cast<const T2*>(indptr);
  return [=] (T idx) {
    std::vector<std::pair<int, double>> ret;
    int64_t start = ptr_indptr[idx];
    int64_t end = ptr_indptr[idx + 1];
    if (end - start > 0)  {
      ret.reserve(end - start);
    }
    for (int64_t i = start; i < end; ++i) {
      ret.emplace_back(indices[i], data_ptr[i]);
    }
    return ret;
  };
}

2910
2911
template<typename T>
std::function<std::vector<std::pair<int, double>>(T idx)>
2912
RowFunctionFromCSR(const void* indptr, int indptr_type, const int32_t* indices, const void* data, int data_type, int64_t , int64_t ) {
Guolin Ke's avatar
Guolin Ke committed
2913
2914
  if (data_type == C_API_DTYPE_FLOAT32) {
    if (indptr_type == C_API_DTYPE_INT32) {
2915
     return RowFunctionFromCSR_helper<T, float, int32_t>(indptr, indices, data);
Guolin Ke's avatar
Guolin Ke committed
2916
    } else if (indptr_type == C_API_DTYPE_INT64) {
2917
     return RowFunctionFromCSR_helper<T, float, int64_t>(indptr, indices, data);
2918
    }
Guolin Ke's avatar
Guolin Ke committed
2919
2920
  } else if (data_type == C_API_DTYPE_FLOAT64) {
    if (indptr_type == C_API_DTYPE_INT32) {
2921
     return RowFunctionFromCSR_helper<T, double, int32_t>(indptr, indices, data);
Guolin Ke's avatar
Guolin Ke committed
2922
    } else if (indptr_type == C_API_DTYPE_INT64) {
2923
     return RowFunctionFromCSR_helper<T, double, int64_t>(indptr, indices, data);
Guolin Ke's avatar
Guolin Ke committed
2924
2925
    }
  }
2926
  Log::Fatal("Unknown data type in RowFunctionFromCSR");
2927
  return nullptr;
2928
2929
}

2930
2931
2932
2933
2934
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948


template <typename T1, typename T2>
std::function<std::pair<int, double>(int idx)> IterateFunctionFromCSC_helper(const void* col_ptr, const int32_t* indices, const void* data, int col_idx) {
  const T1* data_ptr = reinterpret_cast<const T1*>(data);
  const T2* ptr_col_ptr = reinterpret_cast<const T2*>(col_ptr);
  int64_t start = ptr_col_ptr[col_idx];
  int64_t end = ptr_col_ptr[col_idx + 1];
  return [=] (int offset) {
    int64_t i = static_cast<int64_t>(start + offset);
    if (i >= end) {
      return std::make_pair(-1, 0.0);
    }
    int idx = static_cast<int>(indices[i]);
    double val = static_cast<double>(data_ptr[i]);
    return std::make_pair(idx, val);
  };
}

Guolin Ke's avatar
Guolin Ke committed
2949
std::function<std::pair<int, double>(int idx)>
2950
IterateFunctionFromCSC(const void* col_ptr, int col_ptr_type, const int32_t* indices, const void* data, int data_type, int64_t ncol_ptr, int64_t , int col_idx) {
Guolin Ke's avatar
Guolin Ke committed
2951
  CHECK(col_idx < ncol_ptr && col_idx >= 0);
Guolin Ke's avatar
Guolin Ke committed
2952
2953
  if (data_type == C_API_DTYPE_FLOAT32) {
    if (col_ptr_type == C_API_DTYPE_INT32) {
2954
      return IterateFunctionFromCSC_helper<float, int32_t>(col_ptr, indices, data, col_idx);
Guolin Ke's avatar
Guolin Ke committed
2955
    } else if (col_ptr_type == C_API_DTYPE_INT64) {
2956
      return IterateFunctionFromCSC_helper<float, int64_t>(col_ptr, indices, data, col_idx);
Guolin Ke's avatar
Guolin Ke committed
2957
    }
Guolin Ke's avatar
Guolin Ke committed
2958
2959
  } else if (data_type == C_API_DTYPE_FLOAT64) {
    if (col_ptr_type == C_API_DTYPE_INT32) {
2960
      return IterateFunctionFromCSC_helper<double, int32_t>(col_ptr, indices, data, col_idx);
Guolin Ke's avatar
Guolin Ke committed
2961
    } else if (col_ptr_type == C_API_DTYPE_INT64) {
2962
      return IterateFunctionFromCSC_helper<double, int64_t>(col_ptr, indices, data, col_idx);
Guolin Ke's avatar
Guolin Ke committed
2963
2964
    }
  }
2965
  Log::Fatal("Unknown data type in CSC matrix");
2966
  return nullptr;
2967
2968
}

Guolin Ke's avatar
Guolin Ke committed
2969
CSC_RowIterator::CSC_RowIterator(const void* col_ptr, int col_ptr_type, const int32_t* indices,
2970
                                 const void* data, int data_type, int64_t ncol_ptr, int64_t nelem, int col_idx) {
Guolin Ke's avatar
Guolin Ke committed
2971
2972
2973
2974
2975
2976
2977
2978
2979
  iter_fun_ = IterateFunctionFromCSC(col_ptr, col_ptr_type, indices, data, data_type, ncol_ptr, nelem, col_idx);
}

double CSC_RowIterator::Get(int idx) {
  while (idx > cur_idx_ && !is_end_) {
    auto ret = iter_fun_(nonzero_idx_);
    if (ret.first < 0) {
      is_end_ = true;
      break;
2980
    }
Guolin Ke's avatar
Guolin Ke committed
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996
2997
    cur_idx_ = ret.first;
    cur_val_ = ret.second;
    ++nonzero_idx_;
  }
  if (idx == cur_idx_) {
    return cur_val_;
  } else {
    return 0.0f;
  }
}

std::pair<int, double> CSC_RowIterator::NextNonZero() {
  if (!is_end_) {
    auto ret = iter_fun_(nonzero_idx_);
    ++nonzero_idx_;
    if (ret.first < 0) {
      is_end_ = true;
2998
    }
Guolin Ke's avatar
Guolin Ke committed
2999
3000
3001
    return ret;
  } else {
    return std::make_pair(-1, 0.0);
3002
  }
Guolin Ke's avatar
Guolin Ke committed
3003
}