ROIPool.h 5.56 KB
Newer Older
1
2
#pragma once

3
#include "cpu/vision_cpu.h"
4
5

#ifdef WITH_CUDA
6
#include "autocast.h"
7
#include "cuda/vision_cuda.h"
8
#endif
9
#ifdef WITH_HIP
10
#include "autocast.h"
11
12
#include "hip/vision_cuda.h"
#endif
13

14
15
16
// TODO: put this stuff in torchvision namespace

std::tuple<at::Tensor, at::Tensor> roi_pool(
17
18
    const at::Tensor& input,
    const at::Tensor& rois,
19
20
21
22
23
24
25
26
27
    double spatial_scale,
    int64_t pooled_height,
    int64_t pooled_width) {
  static auto op = c10::Dispatcher::singleton()
                       .findSchemaOrThrow("torchvision::roi_pool", "")
                       .typed<decltype(roi_pool)>();
  return op.call(input, rois, spatial_scale, pooled_height, pooled_width);
}

28
#if defined(WITH_CUDA) || defined(WITH_HIP)
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
std::tuple<at::Tensor, at::Tensor> ROIPool_autocast(
    const at::Tensor& input,
    const at::Tensor& rois,
    double spatial_scale,
    int64_t pooled_height,
    int64_t pooled_width) {
  c10::impl::ExcludeDispatchKeyGuard no_autocast(c10::DispatchKey::Autocast);
  auto result = roi_pool(
      at::autocast::cached_cast(at::kFloat, input),
      at::autocast::cached_cast(at::kFloat, rois),
      spatial_scale,
      pooled_height,
      pooled_width);

  return std::make_tuple(
      std::get<0>(result).to(input.scalar_type()),
      std::get<1>(result).to(input.scalar_type()));
46
}
47
#endif
48

49
at::Tensor _roi_pool_backward(
50
51
52
    const at::Tensor& grad,
    const at::Tensor& rois,
    const at::Tensor& argmax,
53
54
55
56
57
58
59
60
61
62
63
    double spatial_scale,
    int64_t pooled_height,
    int64_t pooled_width,
    int64_t batch_size,
    int64_t channels,
    int64_t height,
    int64_t width) {
  static auto op = c10::Dispatcher::singleton()
                       .findSchemaOrThrow("torchvision::_roi_pool_backward", "")
                       .typed<decltype(_roi_pool_backward)>();
  return op.call(
64
65
66
67
68
69
70
71
72
73
      grad,
      rois,
      argmax,
      spatial_scale,
      pooled_height,
      pooled_width,
      batch_size,
      channels,
      height,
      width);
74
75
76
77
}

class ROIPoolFunction : public torch::autograd::Function<ROIPoolFunction> {
 public:
78
79
  static torch::autograd::variable_list forward(
      torch::autograd::AutogradContext* ctx,
80
81
82
83
84
      const torch::autograd::Variable& input,
      const torch::autograd::Variable& rois,
      double spatial_scale,
      int64_t pooled_height,
      int64_t pooled_width) {
85
86
87
88
    ctx->saved_data["spatial_scale"] = spatial_scale;
    ctx->saved_data["pooled_height"] = pooled_height;
    ctx->saved_data["pooled_width"] = pooled_width;
    ctx->saved_data["input_shape"] = input.sizes();
89
90
91
92
    at::AutoNonVariableTypeMode g;
    auto result =
        roi_pool(input, rois, spatial_scale, pooled_height, pooled_width);

93
94
95
96
    auto output = std::get<0>(result);
    auto argmax = std::get<1>(result);
    ctx->save_for_backward({rois, argmax});
    ctx->mark_non_differentiable({argmax});
97

98
99
100
    return {output, argmax};
  }

101
102
  static torch::autograd::variable_list backward(
      torch::autograd::AutogradContext* ctx,
103
      const torch::autograd::variable_list& grad_output) {
104
105
106
107
108
    // Use data saved in forward
    auto saved = ctx->get_saved_variables();
    auto rois = saved[0];
    auto argmax = saved[1];
    auto input_shape = ctx->saved_data["input_shape"].toIntList();
109
    auto grad_in = _roi_pool_backward(
110
111
112
113
114
115
116
117
118
119
        grad_output[0],
        rois,
        argmax,
        ctx->saved_data["spatial_scale"].toDouble(),
        ctx->saved_data["pooled_height"].toInt(),
        ctx->saved_data["pooled_width"].toInt(),
        input_shape[0],
        input_shape[1],
        input_shape[2],
        input_shape[3]);
120

121
122
123
124
125
    return {grad_in,
            torch::autograd::Variable(),
            torch::autograd::Variable(),
            torch::autograd::Variable(),
            torch::autograd::Variable()};
126
127
128
  }
};

129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
// TODO: There should be an easier way to do this
class ROIPoolBackwardFunction
    : public torch::autograd::Function<ROIPoolBackwardFunction> {
 public:
  static torch::autograd::variable_list forward(
      torch::autograd::AutogradContext* ctx,
      const torch::autograd::Variable& grad,
      const torch::autograd::Variable& rois,
      const torch::autograd::Variable& argmax,
      double spatial_scale,
      int64_t pooled_height,
      int64_t pooled_width,
      int64_t batch_size,
      int64_t channels,
      int64_t height,
      int64_t width) {
    at::AutoNonVariableTypeMode g;
    auto grad_in = _roi_pool_backward(
        grad,
        rois,
        argmax,
        spatial_scale,
        pooled_height,
        pooled_width,
        batch_size,
        channels,
        height,
        width);

    return {grad_in};
  }

  static torch::autograd::variable_list backward(
      torch::autograd::AutogradContext* ctx,
      const torch::autograd::variable_list& grad_output) {
    TORCH_CHECK(0, "double backwards on roi_pool not supported");
  }
};

std::tuple<at::Tensor, at::Tensor> ROIPool_autograd(
169
170
    const at::Tensor& input,
    const at::Tensor& rois,
171
172
173
    double spatial_scale,
    int64_t pooled_height,
    int64_t pooled_width) {
174
175
  auto result = ROIPoolFunction::apply(
      input, rois, spatial_scale, pooled_height, pooled_width);
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201

  return std::make_tuple(result[0], result[1]);
}

at::Tensor ROIPool_backward_autograd(
    const at::Tensor& grad,
    const at::Tensor& rois,
    const at::Tensor& argmax,
    double spatial_scale,
    int64_t pooled_height,
    int64_t pooled_width,
    int64_t batch_size,
    int64_t channels,
    int64_t height,
    int64_t width) {
  return ROIPoolBackwardFunction::apply(
      grad,
      rois,
      argmax,
      spatial_scale,
      pooled_height,
      pooled_width,
      batch_size,
      channels,
      height,
      width)[0];
202
}