Unverified Commit 2e1e0b63 authored by Francisco Massa's avatar Francisco Massa Committed by GitHub
Browse files

Fix RoIAlign and RoIPool for non-contiguous gradients (#920)

parent 12d2c737
...@@ -135,6 +135,41 @@ class RoIPoolTester(unittest.TestCase): ...@@ -135,6 +135,41 @@ class RoIPoolTester(unittest.TestCase):
assert torch.allclose(x.grad, gt_grad), 'gradient incorrect for roi_pool' assert torch.allclose(x.grad, gt_grad), 'gradient incorrect for roi_pool'
def test_roi_pool_align_non_cont_grad_cpu(self):
devices = ['cpu']
if torch.cuda.is_available():
devices.append('cuda')
for d in devices:
device = torch.device(d)
rois = torch.tensor([
[0, 0, 0, 9, 9],
[0, 0, 5, 5, 9],
[0, 5, 5, 9, 9]], dtype=self.dtype, device=device)
grad_cont = torch.rand(3, 1, 5, 5, dtype=self.dtype, device=device)
grad = grad_cont.permute(2, 1, 3, 0).contiguous().permute(3, 1, 0, 2)
for op in ['RoIPool', 'RoIAlign']:
x = torch.rand(1, 1, 10, 10, dtype=self.dtype, device=device, requires_grad=True)
kwargs = {}
if op == 'RoIAlign':
kwargs['sampling_ratio'] = 1
m = getattr(ops, op)((5, 5), 1, **kwargs)
y = m(x, rois)
y.backward(grad_cont)
g1 = x.grad.detach().clone()
del x.grad
y = m(x, rois)
y.backward(grad)
g2 = x.grad.detach().clone()
del x.grad
assert torch.allclose(g1, g2), 'gradient incorrect for {}'.format(op)
def test_roi_pool_gradcheck_cpu(self): def test_roi_pool_gradcheck_cpu(self):
device = torch.device('cpu') device = torch.device('cpu')
x = torch.rand(1, 1, 10, 10, dtype=self.dtype, device=device, requires_grad=True) x = torch.rand(1, 1, 10, 10, dtype=self.dtype, device=device, requires_grad=True)
......
...@@ -456,7 +456,7 @@ at::Tensor ROIAlign_backward_cpu( ...@@ -456,7 +456,7 @@ at::Tensor ROIAlign_backward_cpu(
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad.type(), "ROIAlign_forward", [&] { AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad.type(), "ROIAlign_forward", [&] {
ROIAlignBackward<scalar_t>( ROIAlignBackward<scalar_t>(
grad.numel(), grad.numel(),
grad.contiguous().data<scalar_t>(), grad.data<scalar_t>(),
spatial_scale, spatial_scale,
channels, channels,
height, height,
......
...@@ -205,7 +205,7 @@ at::Tensor ROIPool_backward_cpu( ...@@ -205,7 +205,7 @@ at::Tensor ROIPool_backward_cpu(
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad.type(), "ROIPool_backward", [&] { AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad.type(), "ROIPool_backward", [&] {
RoIPoolBackward<scalar_t>( RoIPoolBackward<scalar_t>(
grad.contiguous().data<scalar_t>(), grad.data<scalar_t>(),
argmax.data<int>(), argmax.data<int>(),
num_rois, num_rois,
channels, channels,
......
...@@ -396,7 +396,7 @@ at::Tensor ROIAlign_backward_cuda( ...@@ -396,7 +396,7 @@ at::Tensor ROIAlign_backward_cuda(
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad.type(), "ROIAlign_backward", [&] { AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad.type(), "ROIAlign_backward", [&] {
RoIAlignBackward<scalar_t><<<grid, block, 0, stream>>>( RoIAlignBackward<scalar_t><<<grid, block, 0, stream>>>(
grad.numel(), grad.numel(),
grad.contiguous().data<scalar_t>(), grad.data<scalar_t>(),
spatial_scale, spatial_scale,
channels, channels,
height, height,
......
...@@ -221,7 +221,7 @@ at::Tensor ROIPool_backward_cuda( ...@@ -221,7 +221,7 @@ at::Tensor ROIPool_backward_cuda(
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad.type(), "ROIPool_backward", [&] { AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad.type(), "ROIPool_backward", [&] {
RoIPoolBackward<scalar_t><<<grid, block, 0, stream>>>( RoIPoolBackward<scalar_t><<<grid, block, 0, stream>>>(
grad.numel(), grad.numel(),
grad.contiguous().data<scalar_t>(), grad.data<scalar_t>(),
argmax.contiguous().data<int>(), argmax.contiguous().data<int>(),
num_rois, num_rois,
spatial_scale, spatial_scale,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment