test_deform_conv.py 8.82 KB
Newer Older
1
# Copyright (c) OpenMMLab. All rights reserved.
2
import numpy as np
3
import pytest
4
5
import torch

6
from mmcv.utils import IS_MLU_AVAILABLE, TORCH_VERSION, digit_version
7
8
9
10
11
12
13
14

try:
    # If PyTorch version >= 1.6.0 and fp16 is enabled, torch.cuda.amp.autocast
    # would be imported and used; we should test if our modules support it.
    from torch.cuda.amp import autocast
except ImportError:
    pass

15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
input = [[[[1., 2., 3.], [0., 1., 2.], [3., 5., 2.]]]]
offset_weight = [[[0.1, 0.4, 0.6, 0.1]], [[0.3, 0.2, 0.1, 0.3]],
                 [[0.5, 0.5, 0.2, 0.8]], [[0.8, 0.3, 0.9, 0.1]],
                 [[0.3, 0.1, 0.2, 0.5]], [[0.3, 0.7, 0.5, 0.3]],
                 [[0.6, 0.2, 0.5, 0.3]], [[0.4, 0.1, 0.8, 0.4]]]
offset_bias = [0.7, 0.1, 0.8, 0.5, 0.6, 0.5, 0.4, 0.7]
deform_weight = [[[0.4, 0.2, 0.1, 0.9]]]

gt_out = [[[[1.650, 0.], [0.000, 0.]]]]
gt_x_grad = [[[[-0.666, 0.204, 0.000], [0.030, -0.416, 0.012],
               [0.000, 0.252, 0.129]]]]
gt_offset_weight_grad = [[[[1.44, 2.88], [0.00, 1.44]]],
                         [[[-0.72, -1.44], [0.00, -0.72]]],
                         [[[0.00, 0.00], [0.00, 0.00]]],
                         [[[0.00, 0.00], [0.00, 0.00]]],
                         [[[-0.10, -0.20], [0.00, -0.10]]],
                         [[[-0.08, -0.16], [0.00, -0.08]]],
                         [[[-0.54, -1.08], [0.00, -0.54]]],
                         [[[-0.54, -1.08], [0.00, -0.54]]]]
gt_offset_bias_grad = [1.44, -0.72, 0., 0., -0.10, -0.08, -0.54, -0.54],
gt_deform_weight_grad = [[[[3.62, 0.], [0.40, 0.18]]]]


38
class TestDeformconv:
39

40
41
42
    def _test_deformconv(self,
                         dtype=torch.float,
                         threshold=1e-3,
43
44
45
                         device='cuda',
                         batch_size=10,
                         im2col_step=2):
46
47
        if not torch.cuda.is_available() and device == 'cuda':
            pytest.skip('test requires GPU')
48
49
50
51
        if device == 'mlu':
            from mmcv.ops import DeformConv2dPack_MLU as DeformConv2dPack
        else:
            from mmcv.ops import DeformConv2dPack
52
53
        c_in = 1
        c_out = 1
54
55
56
57
58
        batch_size = 10
        repeated_input = np.repeat(input, batch_size, axis=0)
        repeated_gt_out = np.repeat(gt_out, batch_size, axis=0)
        repeated_gt_x_grad = np.repeat(gt_x_grad, batch_size, axis=0)
        x = torch.tensor(repeated_input, device=device, dtype=dtype)
59
        x.requires_grad = True
60
61
62
63
64
65
66
        model = DeformConv2dPack(
            in_channels=c_in,
            out_channels=c_out,
            kernel_size=2,
            stride=1,
            padding=0,
            im2col_step=im2col_step)
67
68
69
70
71
72
        model.conv_offset.weight.data = torch.nn.Parameter(
            torch.Tensor(offset_weight).reshape(8, 1, 2, 2))
        model.conv_offset.bias.data = torch.nn.Parameter(
            torch.Tensor(offset_bias).reshape(8))
        model.weight.data = torch.nn.Parameter(
            torch.Tensor(deform_weight).reshape(1, 1, 2, 2))
73
74
        if device == 'cuda':
            model.cuda()
75
76
        elif device == 'mlu':
            model.mlu()
77
        model.type(dtype)
78
79
80
81

        out = model(x)
        out.backward(torch.ones_like(out))

82
83
84
85
86
87
        assert np.allclose(out.data.detach().cpu().numpy(), repeated_gt_out,
                           threshold)
        assert np.allclose(x.grad.detach().cpu().numpy(), repeated_gt_x_grad,
                           threshold)
        # the batch size of the input is increased which results in
        # a larger gradient so we need to divide by the batch_size
88
        assert np.allclose(
89
            model.conv_offset.weight.grad.detach().cpu().numpy() / batch_size,
90
            gt_offset_weight_grad, threshold)
91
92
93
94
95
96
        assert np.allclose(
            model.conv_offset.bias.grad.detach().cpu().numpy() / batch_size,
            gt_offset_bias_grad, threshold)
        assert np.allclose(
            model.weight.grad.detach().cpu().numpy() / batch_size,
            gt_deform_weight_grad, threshold)
97

98
        from mmcv.ops import DeformConv2d
99

100
101
102
103
104
105
106
107
108
109
110
111
112
        # test bias
        model = DeformConv2d(1, 1, 2, stride=1, padding=0)
        assert not hasattr(model, 'bias')
        # test bias=True
        with pytest.raises(AssertionError):
            model = DeformConv2d(1, 1, 2, stride=1, padding=0, bias=True)
        # test in_channels % group != 0
        with pytest.raises(AssertionError):
            model = DeformConv2d(3, 2, 3, groups=2)
        # test out_channels % group != 0
        with pytest.raises(AssertionError):
            model = DeformConv2d(3, 4, 3, groups=3)

113
114
115
    def _test_amp_deformconv(self,
                             input_dtype,
                             threshold=1e-3,
116
                             device='cuda',
117
118
                             batch_size=10,
                             im2col_step=2):
119
120
121
122
123
124
125
126
127
128
        """The function to test amp released on pytorch 1.6.0.

        The type of input data might be torch.float or torch.half,
        so we should test deform_conv in both cases. With amp, the
        data type of model will NOT be set manually.

        Args:
            input_dtype: torch.float or torch.half.
            threshold: the same as above function.
        """
129
        if not torch.cuda.is_available() and device == 'cuda':
130
            return
131
132
133
134
        if device == 'mlu':
            from mmcv.ops import DeformConv2dPack_MLU as DeformConv2dPack
        else:
            from mmcv.ops import DeformConv2dPack
135
136
        c_in = 1
        c_out = 1
137
138
139
        repeated_input = np.repeat(input, batch_size, axis=0)
        repeated_gt_out = np.repeat(gt_out, batch_size, axis=0)
        repeated_gt_x_grad = np.repeat(gt_x_grad, batch_size, axis=0)
140
        x = torch.Tensor(repeated_input).to(device).type(input_dtype)
141
        x.requires_grad = True
142
143
144
145
146
147
148
        model = DeformConv2dPack(
            in_channels=c_in,
            out_channels=c_out,
            kernel_size=2,
            stride=1,
            padding=0,
            im2col_step=im2col_step)
149
150
151
152
153
154
        model.conv_offset.weight.data = torch.nn.Parameter(
            torch.Tensor(offset_weight).reshape(8, 1, 2, 2))
        model.conv_offset.bias.data = torch.nn.Parameter(
            torch.Tensor(offset_bias).reshape(8))
        model.weight.data = torch.nn.Parameter(
            torch.Tensor(deform_weight).reshape(1, 1, 2, 2))
155
156
157
158
        if device == 'cuda':
            model.cuda()
        elif device == 'mlu':
            model.mlu()
159
160
161
162

        out = model(x)
        out.backward(torch.ones_like(out))

163
164
165
166
        assert np.allclose(out.data.detach().cpu().numpy(), repeated_gt_out,
                           threshold)
        assert np.allclose(x.grad.detach().cpu().numpy(), repeated_gt_x_grad,
                           threshold)
167
        assert np.allclose(
168
            model.conv_offset.weight.grad.detach().cpu().numpy() / batch_size,
169
            gt_offset_weight_grad, threshold)
170
171
172
173
174
175
        assert np.allclose(
            model.conv_offset.bias.grad.detach().cpu().numpy() / batch_size,
            gt_offset_bias_grad, threshold)
        assert np.allclose(
            model.weight.grad.detach().cpu().numpy() / batch_size,
            gt_deform_weight_grad, threshold)
176
177

        from mmcv.ops import DeformConv2d
178

179
180
181
182
183
184
185
186
187
188
189
190
191
        # test bias
        model = DeformConv2d(1, 1, 2, stride=1, padding=0)
        assert not hasattr(model, 'bias')
        # test bias=True
        with pytest.raises(AssertionError):
            model = DeformConv2d(1, 1, 2, stride=1, padding=0, bias=True)
        # test in_channels % group != 0
        with pytest.raises(AssertionError):
            model = DeformConv2d(3, 2, 3, groups=2)
        # test out_channels % group != 0
        with pytest.raises(AssertionError):
            model = DeformConv2d(3, 4, 3, groups=3)

192
    def test_deformconv(self):
193
194
        self._test_deformconv(torch.double, device='cpu')
        self._test_deformconv(torch.float, device='cpu', threshold=1e-1)
195
196
197
198
199

        device = 'mlu' if IS_MLU_AVAILABLE else 'cuda'
        self._test_deformconv(torch.double, device=device)
        self._test_deformconv(torch.float, device=device)
        self._test_deformconv(torch.half, threshold=1e-1, device=device)
200
        # test batch_size < im2col_step
201
202
        self._test_deformconv(
            torch.float, batch_size=1, im2col_step=2, device=device)
203
204
205
206
        # test bach_size % im2col_step != 0
        with pytest.raises(
                AssertionError,
                match='batch size must be divisible by im2col_step'):
207
208
            self._test_deformconv(
                torch.float, batch_size=10, im2col_step=3, device=device)
209
210
211

        # test amp when torch version >= '1.6.0', the type of
        # input data for deformconv might be torch.float or torch.half
212
        if (TORCH_VERSION != 'parrots'
213
                and digit_version(TORCH_VERSION) >= digit_version('1.6.0')):
214
            with autocast(enabled=True):
215
216
                self._test_amp_deformconv(torch.float, 1e-1, device)
                self._test_amp_deformconv(torch.half, 1e-1, device)