test_roi_align.py 3.56 KB
Newer Older
1
# Copyright (c) OpenMMLab. All rights reserved.
2
import numpy as np
Jerry Jiarui XU's avatar
Jerry Jiarui XU committed
3
import pytest
4
5
6
7
8
9
10
11
12
import torch

_USING_PARROTS = True
try:
    from parrots.autograd import gradcheck
except ImportError:
    from torch.autograd import gradcheck
    _USING_PARROTS = False

Jerry Jiarui XU's avatar
Jerry Jiarui XU committed
13
14
15
16
17
18
19
20
21
22
23
# yapf:disable
inputs = [([[[[1., 2.], [3., 4.]]]],
           [[0., 0., 0., 1., 1.]]),
          ([[[[1., 2.], [3., 4.]],
             [[4., 3.], [2., 1.]]]],
           [[0., 0., 0., 1., 1.]]),
          ([[[[1., 2., 5., 6.], [3., 4., 7., 8.],
              [9., 10., 13., 14.], [11., 12., 15., 16.]]]],
           [[0., 0., 0., 3., 3.]])]
outputs = [([[[[1.0, 1.25], [1.5, 1.75]]]],
            [[[[3.0625, 0.4375], [0.4375, 0.0625]]]]),
24
           ([[[[1.0, 1.25], [1.5, 1.75]],
Jerry Jiarui XU's avatar
Jerry Jiarui XU committed
25
26
27
              [[4.0, 3.75], [3.5, 3.25]]]],
            [[[[3.0625, 0.4375], [0.4375, 0.0625]],
              [[3.0625, 0.4375], [0.4375, 0.0625]]]]),
28
29
30
31
32
           ([[[[1.9375, 4.75], [7.5625, 10.375]]]],
            [[[[0.47265625, 0.42968750, 0.42968750, 0.04296875],
               [0.42968750, 0.39062500, 0.39062500, 0.03906250],
               [0.42968750, 0.39062500, 0.39062500, 0.03906250],
               [0.04296875, 0.03906250, 0.03906250, 0.00390625]]]])]
Jerry Jiarui XU's avatar
Jerry Jiarui XU committed
33
# yapf:enable
34

Jerry Jiarui XU's avatar
Jerry Jiarui XU committed
35
36
37
38
pool_h = 2
pool_w = 2
spatial_scale = 1.0
sampling_ratio = 2
39
40


Jerry Jiarui XU's avatar
Jerry Jiarui XU committed
41
42
43
44
def _test_roialign_gradcheck(device, dtype):
    if not torch.cuda.is_available() and device == 'cuda':
        pytest.skip('test requires GPU')
    try:
45
        from mmcv.ops import RoIAlign
Jerry Jiarui XU's avatar
Jerry Jiarui XU committed
46
47
48
49
50
51
52
    except ModuleNotFoundError:
        pytest.skip('RoIAlign op is not successfully compiled')
    if dtype is torch.half:
        pytest.skip('grad check does not support fp16')
    for case in inputs:
        np_input = np.array(case[0])
        np_rois = np.array(case[1])
53

Jerry Jiarui XU's avatar
Jerry Jiarui XU committed
54
55
56
        x = torch.tensor(
            np_input, dtype=dtype, device=device, requires_grad=True)
        rois = torch.tensor(np_rois, dtype=dtype, device=device)
57

Jerry Jiarui XU's avatar
Jerry Jiarui XU committed
58
        froipool = RoIAlign((pool_h, pool_w), spatial_scale, sampling_ratio)
59

60
61
62
63
64
        if torch.__version__ == 'parrots':
            gradcheck(
                froipool, (x, rois), no_grads=[rois], delta=1e-5, pt_atol=1e-5)
        else:
            gradcheck(froipool, (x, rois), eps=1e-5, atol=1e-5)
65
66


Jerry Jiarui XU's avatar
Jerry Jiarui XU committed
67
68
69
70
def _test_roialign_allclose(device, dtype):
    if not torch.cuda.is_available() and device == 'cuda':
        pytest.skip('test requires GPU')
    try:
71
        from mmcv.ops import roi_align
Jerry Jiarui XU's avatar
Jerry Jiarui XU committed
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
    except ModuleNotFoundError:
        pytest.skip('test requires compilation')
    pool_h = 2
    pool_w = 2
    spatial_scale = 1.0
    sampling_ratio = 2

    for case, output in zip(inputs, outputs):
        np_input = np.array(case[0])
        np_rois = np.array(case[1])
        np_output = np.array(output[0])
        np_grad = np.array(output[1])

        x = torch.tensor(
            np_input, dtype=dtype, device=device, requires_grad=True)
        rois = torch.tensor(np_rois, dtype=dtype, device=device)

        output = roi_align(x, rois, (pool_h, pool_w), spatial_scale,
                           sampling_ratio, 'avg', True)
        output.backward(torch.ones_like(output))
        assert np.allclose(
            output.data.type(torch.float).cpu().numpy(), np_output, atol=1e-3)
        assert np.allclose(
            x.grad.data.type(torch.float).cpu().numpy(), np_grad, atol=1e-3)


@pytest.mark.parametrize('device', ['cuda', 'cpu'])
@pytest.mark.parametrize('dtype', [torch.float, torch.double, torch.half])
def test_roialign(device, dtype):
    # check double only
    if dtype is torch.double:
        _test_roialign_gradcheck(device=device, dtype=dtype)
    _test_roialign_allclose(device=device, dtype=dtype)