mlp.cpp 4.68 KB
Newer Older
1
2
3
4
5
6
#include <torch/extension.h>
#include <torch/torch.h>
#include <vector>

#include <stdio.h>

7
8
9
10
11
12
13
14
int SizeTToInt(size_t data)
{
    if (data > std::numeric_limits<int>::max()) {
        throw std::runtime_error("Invalid cast.");
    }
    return static_cast<int>(data);
}

15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
size_t get_mlp_reserved_space(int batch_size, int num_layers, const int* output_features);

template <typename T>
size_t get_mlp_bp_workspace_in_bytes(int batch_size, int num_layers, const int* output_features);

template <typename T>
int mlp_fp(
    T* X,
    int input_features,
    int batch_size,
    T** WPtr,
    int num_layers,
    int* output_features,
    T** BPtr,
    T* Y,
30
31
32
    T* reserved_space,
    int use_bias,
    int activation);
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47

template <typename T>
int mlp_bp(
    T* X,
    T* Y,
    int input_features,
    int batch_size,
    T** WPtr,
    int num_layers,
    int* output_features,
    T* dY,
    T* reserved_space,
    T* work_space,
    T* dX,
    T** dwPtr,
48
49
50
51
52
53
    T** dbPtr,
    bool requires_grad,
    int use_bias,
    int activation);

std::vector<at::Tensor> mlp_forward(int use_bias, int activation, std::vector<at::Tensor> inputs) {
54

55
56
57
58
59
  auto num_layers = inputs.size() - 1;
  if (use_bias) {
    // inputs contains (input, weights, biases)
    num_layers /= 2;
  }
60
61
62
63
64
65
66
67
68
69
70
71
72
  auto batch_size = inputs[0].size(0);
  auto input_features = inputs[0].size(1);

  std::vector<int> output_features;
  for (int i = 0; i < num_layers; i++) {
    output_features.push_back(inputs[i + 1].size(0));
  }

  auto reserved_size = get_mlp_reserved_space(batch_size, num_layers, output_features.data());

  // create output/workspace tensor
  // TODO(deyuf): just get buffer?
  auto out = at::empty({batch_size, output_features.back()}, inputs[0].type());
73
  auto reserved_space = at::empty({SizeTToInt(reserved_size)}, inputs[0].type());
74
75
76
77
78
79

  AT_DISPATCH_FLOATING_TYPES_AND_HALF(inputs[0].type(), "mlp_forward", [&] {
    std::vector<scalar_t*> w_ptr;
    std::vector<scalar_t*> b_ptr;
    for (int i = 0; i < num_layers; i++) {
      w_ptr.push_back(inputs[i + 1].data_ptr<scalar_t>());
80
81
82
      if (use_bias) {
        b_ptr.push_back(inputs[i + 1 + num_layers].data_ptr<scalar_t>());
      }
83
84
85
86
87
88
89
90
91
92
    }
    auto result = mlp_fp<scalar_t>(
        inputs[0].data_ptr<scalar_t>(),
        input_features,
        batch_size,
        w_ptr.data(),
        num_layers,
        output_features.data(),
        b_ptr.data(),
        out.data_ptr<scalar_t>(),
93
94
95
        reserved_space.data_ptr<scalar_t>(),
        use_bias,
        activation);
96
97
98
99
100
101
  });

  return {out, reserved_space};
}

std::vector<at::Tensor> mlp_backward(
102
103
104
105
106
107
108
109
110
111
112
113
  int use_bias,
  int activation,
  at::Tensor grad_o,
  std::vector<at::Tensor> fprop_outputs,
  std::vector<at::Tensor> inputs) {

  auto num_layers = inputs.size() - 1;
  if (use_bias) {
    // inputs contains (input, weights, biases)
    num_layers /= 2;
  }

114
115
116
  auto batch_size = inputs[0].size(0);
  auto input_features = inputs[0].size(1);

117
118
119
  // TODO: not creating empty tensor for it?
  bool requires_grad = inputs[0].requires_grad();

120
121
122
123
124
  std::vector<int> output_features;
  for (int i = 0; i < num_layers; i++) {
    output_features.push_back(inputs[i + 1].size(0));
  }
  // create outputs, length of inputs
125
  // TODO: not create bias if not needed
126
127
128
129
130
  std::vector<at::Tensor> outputs;
  for (int i = 0; i < inputs.size(); i++) {
    outputs.push_back(at::empty(inputs[i].sizes(), inputs[i].type()));  // clone for testing now
  }

131
  AT_DISPATCH_FLOATING_TYPES_AND_HALF(inputs[0].type(), "mlp_backward", [&] {
132
133
134
135
136
137
138
139
140
141
142
143
144
    std::vector<scalar_t*> w_ptr;
    for (int i = 0; i < num_layers; i++) {
      w_ptr.push_back(inputs[i + 1].data_ptr<scalar_t>());
    }
    std::vector<scalar_t*> outputs_ptr;
    for (int i = 0; i < inputs.size(); i++) {
      outputs_ptr.push_back(outputs[i].data_ptr<scalar_t>());
    }

    auto work_size =
        get_mlp_bp_workspace_in_bytes<scalar_t>(batch_size, num_layers, output_features.data());

    // auto work_space = at::empty({work_size*4}, at::kByte);
145
    auto work_space = at::empty({SizeTToInt(work_size / sizeof(scalar_t))}, inputs[0].type());
146
147
148
149
150
151
152
153
154
155
156
157
158
159

    auto result = mlp_bp<scalar_t>(
        inputs[0].data_ptr<scalar_t>(),
        fprop_outputs[0].data_ptr<scalar_t>(),
        input_features,
        batch_size,
        w_ptr.data(),
        num_layers,
        output_features.data(),
        grad_o.contiguous().data_ptr<scalar_t>(),
        fprop_outputs[1].data_ptr<scalar_t>(),
        work_space.data_ptr<scalar_t>(),
        outputs_ptr[0],
        outputs_ptr.data() + 1,
160
161
162
163
        outputs_ptr.data() + 1 + num_layers,
        requires_grad,
        use_bias,
        activation);
164
165
166
167
168
169
170
171
172
  });

  return outputs;
}

PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
  m.def("forward", &mlp_forward, "MLP forward");
  m.def("backward", &mlp_backward, "MLP backward");
}