cuda_regression_objective.hpp 3.8 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
/*!
 * Copyright (c) 2021 Microsoft Corporation. All rights reserved.
 * Licensed under the MIT License. See LICENSE file in the project root for
 * license information.
 */

#ifndef LIGHTGBM_NEW_CUDA_REGRESSION_OBJECTIVE_HPP_
#define LIGHTGBM_NEW_CUDA_REGRESSION_OBJECTIVE_HPP_

#ifdef USE_CUDA_EXP

#define GET_GRADIENTS_BLOCK_SIZE_REGRESSION (1024)

#include <LightGBM/cuda/cuda_objective_function.hpp>

#include <string>
#include <vector>

#include "../regression_objective.hpp"

namespace LightGBM {

class CUDARegressionL2loss : public CUDAObjectiveInterface, public RegressionL2loss {
 public:
  explicit CUDARegressionL2loss(const Config& config);

  explicit CUDARegressionL2loss(const std::vector<std::string>& strs);

  ~CUDARegressionL2loss();

  void Init(const Metadata& metadata, data_size_t num_data) override;

  void GetGradients(const double* score, score_t* gradients, score_t* hessians) const override;

  void ConvertOutputCUDA(const data_size_t num_data, const double* input, double* output) const override;

  double BoostFromScore(int) const override;

  std::function<void(data_size_t, const double*, double*)> GetCUDAConvertOutputFunc() const override {
    return [this] (data_size_t num_data, const double* input, double* output) {
      ConvertOutputCUDA(num_data, input, output);
    };
  }

  bool IsConstantHessian() const override {
    if (cuda_weights_ == nullptr) {
      return true;
    } else {
      return false;
    }
  }

  bool IsCUDAObjective() const override { return true; }

 protected:
  virtual double LaunchCalcInitScoreKernel() const;

  virtual void LaunchGetGradientsKernel(const double* score, score_t* gradients, score_t* hessians) const;

  virtual void LaunchConvertOutputCUDAKernel(const data_size_t num_data, const double* input, double* output) const;

  const label_t* cuda_labels_;
  const label_t* cuda_weights_;
64
65
  CUDAVector<label_t> cuda_trans_label_;
  CUDAVector<double> cuda_block_buffer_;
66
67
68
69
70
  data_size_t num_get_gradients_blocks_;
  data_size_t num_init_score_blocks_;
};


71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
class CUDARegressionL1loss : public CUDARegressionL2loss {
 public:
  explicit CUDARegressionL1loss(const Config& config);

  explicit CUDARegressionL1loss(const std::vector<std::string>& strs);

  ~CUDARegressionL1loss();

  void Init(const Metadata& metadata, data_size_t num_data) override;

  void RenewTreeOutputCUDA(const double* score, const data_size_t* data_indices_in_leaf, const data_size_t* num_data_in_leaf,
    const data_size_t* data_start_in_leaf, const int num_leaves, double* leaf_value) const override;

  bool IsRenewTreeOutput() const override { return true; }

 protected:
  CUDAVector<data_size_t> cuda_data_indices_buffer_;
  CUDAVector<double> cuda_weights_prefix_sum_;
  CUDAVector<double> cuda_weights_prefix_sum_buffer_;
  CUDAVector<double> cuda_residual_buffer_;
  CUDAVector<label_t> cuda_weight_by_leaf_buffer_;
  CUDAVector<label_t> cuda_percentile_result_;

  double LaunchCalcInitScoreKernel() const override;

  void LaunchGetGradientsKernel(const double* score, score_t* gradients, score_t* hessians) const override;

  void LaunchRenewTreeOutputCUDAKernel(
    const double* score, const data_size_t* data_indices_in_leaf, const data_size_t* num_data_in_leaf,
    const data_size_t* data_start_in_leaf, const int num_leaves, double* leaf_value) const;
};


104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
class CUDARegressionHuberLoss : public CUDARegressionL2loss {
 public:
  explicit CUDARegressionHuberLoss(const Config& config);

  explicit CUDARegressionHuberLoss(const std::vector<std::string>& strs);

  ~CUDARegressionHuberLoss();

  bool IsRenewTreeOutput() const override { return true; }

 private:
  void LaunchGetGradientsKernel(const double* score, score_t* gradients, score_t* hessians) const override;

  const double alpha_ = 0.0f;
};


121
122
123
124
}  // namespace LightGBM

#endif  // USE_CUDA_EXP
#endif  // LIGHTGBM_NEW_CUDA_REGRESSION_OBJECTIVE_HPP_