#include #include "roi_pool_kernel.h" namespace vision { namespace ops { namespace { template inline void add(T* address, const T& val) { *address += val; } template void roi_pool_forward_kernel_impl( const T* input, const T spatial_scale, int channels, int height, int width, int pooled_height, int pooled_width, const T* rois, int num_rois, T* output, int* argmax_data) { for (int n = 0; n < num_rois; ++n) { const T* offset_rois = rois + n * 5; int roi_batch_ind = offset_rois[0]; int roi_start_w = round(offset_rois[1] * spatial_scale); int roi_start_h = round(offset_rois[2] * spatial_scale); int roi_end_w = round(offset_rois[3] * spatial_scale); int roi_end_h = round(offset_rois[4] * spatial_scale); // Force malformed ROIs to be 1x1 int roi_width = std::max(roi_end_w - roi_start_w + 1, 1); int roi_height = std::max(roi_end_h - roi_start_h + 1, 1); T bin_size_h = static_cast(roi_height) / static_cast(pooled_height); T bin_size_w = static_cast(roi_width) / static_cast(pooled_width); for (int ph = 0; ph < pooled_height; ++ph) { for (int pw = 0; pw < pooled_width; ++pw) { int hstart = static_cast(floor(static_cast(ph) * bin_size_h)); int wstart = static_cast(floor(static_cast(pw) * bin_size_w)); int hend = static_cast(ceil(static_cast(ph + 1) * bin_size_h)); int wend = static_cast(ceil(static_cast(pw + 1) * bin_size_w)); // Add roi offsets and clip to input boundaries hstart = std::min(std::max(hstart + roi_start_h, 0), height); hend = std::min(std::max(hend + roi_start_h, 0), height); wstart = std::min(std::max(wstart + roi_start_w, 0), width); wend = std::min(std::max(wend + roi_start_w, 0), width); bool is_empty = (hend <= hstart) || (wend <= wstart); for (int c = 0; c < channels; ++c) { // Define an empty pooling region to be zero T maxval = is_empty ? 0 : -FLT_MAX; // If nothing is pooled, argmax = -1 causes nothing to be backprop'd int maxidx = -1; const T* input_offset = input + (roi_batch_ind * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { int input_index = h * width + w; if (input_offset[input_index] > maxval) { maxval = input_offset[input_index]; maxidx = input_index; } } } int index = ((n * channels + c) * pooled_height + ph) * pooled_width + pw; output[index] = maxval; argmax_data[index] = maxidx; } // channels } // pooled_width } // pooled_height } // num_rois } template void roi_pool_backward_kernel_impl( const T* grad_output, const int* argmax_data, int num_rois, int channels, int height, int width, int pooled_height, int pooled_width, T* grad_input, const T* rois, int n_stride, int c_stride, int h_stride, int w_stride) { for (int n = 0; n < num_rois; ++n) { const T* offset_rois = rois + n * 5; int roi_batch_ind = offset_rois[0]; for (int c = 0; c < channels; ++c) { T* grad_input_offset = grad_input + ((roi_batch_ind * channels + c) * height * width); const int* argmax_data_offset = argmax_data + (n * channels + c) * pooled_height * pooled_width; for (int ph = 0; ph < pooled_height; ++ph) { for (int pw = 0; pw < pooled_width; ++pw) { int output_offset = n * n_stride + c * c_stride; int argmax = argmax_data_offset[ph * pooled_width + pw]; if (argmax != -1) { add(grad_input_offset + argmax, static_cast( grad_output [output_offset + ph * h_stride + pw * w_stride])); } } // pooled_width } // pooled_height } // channels } // num_rois } } // namespace std::tuple roi_pool_forward_cpu( const at::Tensor& input, const at::Tensor& rois, double spatial_scale, int64_t pooled_height, int64_t pooled_width) { TORCH_CHECK(input.device().is_cpu(), "input must be a CPU tensor"); TORCH_CHECK(rois.device().is_cpu(), "rois must be a CPU tensor"); at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2}; at::CheckedFrom c = "roi_pool_forward_cpu"; at::checkAllSameType(c, {input_t, rois_t}); int num_rois = rois.size(0); int channels = input.size(1); int height = input.size(2); int width = input.size(3); at::Tensor output = at::zeros( {num_rois, channels, pooled_height, pooled_width}, input.options()); at::Tensor argmax = at::zeros( {num_rois, channels, pooled_height, pooled_width}, input.options().dtype(at::kInt)); if (output.numel() == 0) { return std::make_tuple(output, argmax); } auto input_ = input.contiguous(), rois_ = rois.contiguous(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "roi_pool_forward_cpu", [&] { roi_pool_forward_kernel_impl( input_.data_ptr(), spatial_scale, channels, height, width, pooled_height, pooled_width, rois_.data_ptr(), num_rois, output.data_ptr(), argmax.data_ptr()); }); return std::make_tuple(output, argmax); } at::Tensor roi_pool_backward_cpu( const at::Tensor& grad, const at::Tensor& rois, const at::Tensor& argmax, double spatial_scale, int64_t pooled_height, int64_t pooled_width, int64_t batch_size, int64_t channels, int64_t height, int64_t width) { // Check if input tensors are CPU tensors TORCH_CHECK(grad.device().is_cpu(), "grad must be a CPU tensor"); TORCH_CHECK(rois.device().is_cpu(), "rois must be a CPU tensor"); TORCH_CHECK(argmax.device().is_cpu(), "argmax must be a CPU tensor"); TORCH_CHECK( rois.size(1) == 5, "Tensor rois should have shape as Tensor[K, 5]"); at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2}; at::CheckedFrom c = "roi_pool_backward_cpu"; at::checkAllSameType(c, {grad_t, rois_t}); auto num_rois = rois.size(0); at::Tensor grad_input = at::zeros({batch_size, channels, height, width}, grad.options()); // handle possibly empty gradients if (grad.numel() == 0) { return grad_input; } // get stride values to ensure indexing into gradients is correct. int n_stride = grad.stride(0); int c_stride = grad.stride(1); int h_stride = grad.stride(2); int w_stride = grad.stride(3); auto rois_ = rois.contiguous(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( grad.scalar_type(), "roi_pool_backward_cpu", [&] { roi_pool_backward_kernel_impl( grad.data_ptr(), argmax.data_ptr(), num_rois, channels, height, width, pooled_height, pooled_width, grad_input.data_ptr(), rois_.data_ptr(), n_stride, c_stride, h_stride, w_stride); }); return grad_input; } } // namespace ops } // namespace vision