ps_roi_pool.cpp 6.36 KB
Newer Older
1
#include "ps_roi_pool.h"
2
3
4

#include <torch/autograd.h>
#include <torch/types.h>
5

6
7
#if defined(WITH_CUDA) || defined(WITH_HIP)
#include <ATen/autocast_mode.h>
8
#endif
9

10
11
namespace vision {
namespace ops {
12
13

std::tuple<at::Tensor, at::Tensor> ps_roi_pool(
14
15
    const at::Tensor& input,
    const at::Tensor& rois,
16
17
18
19
20
21
22
23
24
    double spatial_scale,
    int64_t pooled_height,
    int64_t pooled_width) {
  static auto op = c10::Dispatcher::singleton()
                       .findSchemaOrThrow("torchvision::ps_roi_pool", "")
                       .typed<decltype(ps_roi_pool)>();
  return op.call(input, rois, spatial_scale, pooled_height, pooled_width);
}

25
#if defined(WITH_CUDA) || defined(WITH_HIP)
26
std::tuple<at::Tensor, at::Tensor> ps_roi_pool_autocast(
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
    const at::Tensor& input,
    const at::Tensor& rois,
    double spatial_scale,
    int64_t pooled_height,
    int64_t pooled_width) {
  c10::impl::ExcludeDispatchKeyGuard no_autocast(c10::DispatchKey::Autocast);
  auto result = ps_roi_pool(
      at::autocast::cached_cast(at::kFloat, input),
      at::autocast::cached_cast(at::kFloat, rois),
      spatial_scale,
      pooled_height,
      pooled_width);

  return std::make_tuple(
      std::get<0>(result).to(input.scalar_type()),
      std::get<1>(result).to(input.scalar_type()));
43
}
44
45
46
47

TORCH_LIBRARY_IMPL(torchvision, Autocast, m) {
  m.impl("ps_roi_pool", ps_roi_pool_autocast);
}
48
#endif
49

50
at::Tensor _ps_roi_pool_backward(
51
52
    const at::Tensor& grad,
    const at::Tensor& rois,
53
54
55
56
57
58
59
60
61
62
63
64
65
    const at::Tensor& channel_mapping,
    double spatial_scale,
    int64_t pooled_height,
    int64_t pooled_width,
    int64_t batch_size,
    int64_t channels,
    int64_t height,
    int64_t width) {
  static auto op =
      c10::Dispatcher::singleton()
          .findSchemaOrThrow("torchvision::_ps_roi_pool_backward", "")
          .typed<decltype(_ps_roi_pool_backward)>();
  return op.call(
66
67
      grad,
      rois,
68
      channel_mapping,
69
70
71
72
73
74
75
76
77
      spatial_scale,
      pooled_height,
      pooled_width,
      batch_size,
      channels,
      height,
      width);
}

78
79
80
81
82
83
84
TORCH_LIBRARY_FRAGMENT(torchvision, m) {
  m.def(
      "ps_roi_pool(Tensor input, Tensor rois, float spatial_scale, int pooled_height, int pooled_width) -> (Tensor, Tensor)");
  m.def(
      "_ps_roi_pool_backward(Tensor grad, Tensor rois, Tensor channel_mapping, float spatial_scale, int pooled_height, int pooled_width, int batch_size, int channels, int height, int width) -> Tensor");
}

85
86
namespace {

87
88
class PSROIPoolFunction : public torch::autograd::Function<PSROIPoolFunction> {
 public:
89
90
  static torch::autograd::variable_list forward(
      torch::autograd::AutogradContext* ctx,
91
92
93
94
95
      const torch::autograd::Variable& input,
      const torch::autograd::Variable& rois,
      double spatial_scale,
      int64_t pooled_height,
      int64_t pooled_width) {
96
97
98
99
    ctx->saved_data["spatial_scale"] = spatial_scale;
    ctx->saved_data["pooled_height"] = pooled_height;
    ctx->saved_data["pooled_width"] = pooled_width;
    ctx->saved_data["input_shape"] = input.sizes();
100
101
102
103
    at::AutoNonVariableTypeMode g;
    auto result =
        ps_roi_pool(input, rois, spatial_scale, pooled_height, pooled_width);

104
105
106
107
    auto output = std::get<0>(result);
    auto channel_mapping = std::get<1>(result);
    ctx->save_for_backward({rois, channel_mapping});
    ctx->mark_non_differentiable({channel_mapping});
108

109
110
111
    return {output, channel_mapping};
  }

112
113
  static torch::autograd::variable_list backward(
      torch::autograd::AutogradContext* ctx,
114
      const torch::autograd::variable_list& grad_output) {
115
116
117
118
119
    // Use data saved in forward
    auto saved = ctx->get_saved_variables();
    auto rois = saved[0];
    auto channel_mapping = saved[1];
    auto input_shape = ctx->saved_data["input_shape"].toIntList();
120
    auto grad_in = _ps_roi_pool_backward(
121
122
123
124
125
126
127
128
129
130
        grad_output[0],
        rois,
        channel_mapping,
        ctx->saved_data["spatial_scale"].toDouble(),
        ctx->saved_data["pooled_height"].toInt(),
        ctx->saved_data["pooled_width"].toInt(),
        input_shape[0],
        input_shape[1],
        input_shape[2],
        input_shape[3]);
131

132
133
134
135
136
    return {grad_in,
            torch::autograd::Variable(),
            torch::autograd::Variable(),
            torch::autograd::Variable(),
            torch::autograd::Variable()};
137
138
139
  }
};

140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
// TODO: There should be an easier way to do this
class PSROIPoolBackwardFunction
    : public torch::autograd::Function<PSROIPoolBackwardFunction> {
 public:
  static torch::autograd::variable_list forward(
      torch::autograd::AutogradContext* ctx,
      const torch::autograd::Variable& grad,
      const torch::autograd::Variable& rois,
      const torch::autograd::Variable& channel_mapping,
      double spatial_scale,
      int64_t pooled_height,
      int64_t pooled_width,
      int64_t batch_size,
      int64_t channels,
      int64_t height,
      int64_t width) {
    at::AutoNonVariableTypeMode g;
    auto grad_in = _ps_roi_pool_backward(
        grad,
        rois,
        channel_mapping,
        spatial_scale,
        pooled_height,
        pooled_width,
        batch_size,
        channels,
        height,
        width);

    return {grad_in};
  }

  static torch::autograd::variable_list backward(
      torch::autograd::AutogradContext* ctx,
      const torch::autograd::variable_list& grad_output) {
    TORCH_CHECK(0, "double backwards on ps_roi_pool not supported");
  }
};

179
180
181
} // namespace

std::tuple<at::Tensor, at::Tensor> ps_roi_pool_autograd(
182
183
    const at::Tensor& input,
    const at::Tensor& rois,
184
185
186
    double spatial_scale,
    int64_t pooled_height,
    int64_t pooled_width) {
187
188
  auto result = PSROIPoolFunction::apply(
      input, rois, spatial_scale, pooled_height, pooled_width);
189
190
191
192

  return std::make_tuple(result[0], result[1]);
}

193
at::Tensor ps_roi_pool_backward_autograd(
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
    const at::Tensor& grad,
    const at::Tensor& rois,
    const at::Tensor& channel_mapping,
    double spatial_scale,
    int64_t pooled_height,
    int64_t pooled_width,
    int64_t batch_size,
    int64_t channels,
    int64_t height,
    int64_t width) {
  return PSROIPoolBackwardFunction::apply(
      grad,
      rois,
      channel_mapping,
      spatial_scale,
      pooled_height,
      pooled_width,
      batch_size,
      channels,
      height,
      width)[0];
215
}
216

217
218
219
220
221
TORCH_LIBRARY_IMPL(torchvision, Autograd, m) {
  m.impl("ps_roi_pool", ps_roi_pool_autograd);
  m.impl("_ps_roi_pool_backward", ps_roi_pool_backward_autograd);
}

222
223
} // namespace ops
} // namespace vision