#pragma once #include "cpu/vision_cpu.h" #ifdef WITH_CUDA #include "cuda/vision_cuda.h" #endif std::tuple ROIPool_forward( const at::Tensor& input, const at::Tensor& rois, const double spatial_scale, const int64_t pooled_height, const int64_t pooled_width) { if (input.type().is_cuda()) { #ifdef WITH_CUDA return ROIPool_forward_cuda( input, rois, spatial_scale, pooled_height, pooled_width); #else AT_ERROR("Not compiled with GPU support"); #endif } return ROIPool_forward_cpu( input, rois, spatial_scale, pooled_height, pooled_width); } at::Tensor ROIPool_backward( const at::Tensor& grad, const at::Tensor& rois, const at::Tensor& argmax, const float spatial_scale, const int pooled_height, const int pooled_width, const int batch_size, const int channels, const int height, const int width) { if (grad.type().is_cuda()) { #ifdef WITH_CUDA return ROIPool_backward_cuda( grad, rois, argmax, spatial_scale, pooled_height, pooled_width, batch_size, channels, height, width); #else AT_ERROR("Not compiled with GPU support"); #endif } return ROIPool_backward_cpu( grad, rois, argmax, spatial_scale, pooled_height, pooled_width, batch_size, channels, height, width); }