test_anchors.py 9.12 KB
Newer Older
dingchang's avatar
dingchang committed
1
# Copyright (c) OpenMMLab. All rights reserved.
2
3
"""
CommandLine:
4
5
    pytest tests/test_utils/test_anchor.py
    xdoctest tests/test_utils/test_anchor.py zero
6
7
8
9

"""
import torch

10
from mmdet3d.core.anchor import build_prior_generator
yinchimaoliang's avatar
yinchimaoliang committed
11
12
13
14
15
16
17
18
19
20
21
22
23
24


def test_anchor_3d_range_generator():
    if torch.cuda.is_available():
        device = 'cuda'
    else:
        device = 'cpu'
    anchor_generator_cfg = dict(
        type='Anchor3DRangeGenerator',
        ranges=[
            [0, -39.68, -0.6, 70.4, 39.68, -0.6],
            [0, -39.68, -0.6, 70.4, 39.68, -0.6],
            [0, -39.68, -1.78, 70.4, 39.68, -1.78],
        ],
25
        sizes=[[0.8, 0.6, 1.73], [1.76, 0.6, 1.73], [3.9, 1.6, 1.56]],
yinchimaoliang's avatar
yinchimaoliang committed
26
27
28
        rotations=[0, 1.57],
        reshape_out=False)

29
    anchor_generator = build_prior_generator(anchor_generator_cfg)
yinchimaoliang's avatar
yinchimaoliang committed
30
31
32
33
34
    repr_str = repr(anchor_generator)
    expected_repr_str = 'Anchor3DRangeGenerator(anchor_range=' \
                        '[[0, -39.68, -0.6, 70.4, 39.68, -0.6], ' \
                        '[0, -39.68, -0.6, 70.4, 39.68, -0.6], ' \
                        '[0, -39.68, -1.78, 70.4, 39.68, -1.78]],' \
35
36
                        '\nscales=[1],\nsizes=[[0.8, 0.6, 1.73], ' \
                        '[1.76, 0.6, 1.73], [3.9, 1.6, 1.56]],' \
yinchimaoliang's avatar
yinchimaoliang committed
37
38
39
40
41
42
43
44
                        '\nrotations=[0, 1.57],\nreshape_out=False,' \
                        '\nsize_per_range=True)'
    assert repr_str == expected_repr_str
    featmap_size = (256, 256)
    mr_anchors = anchor_generator.single_level_grid_anchors(
        featmap_size, 1.1, device=device)
    assert mr_anchors.shape == torch.Size([1, 256, 256, 3, 2, 7])

45
46
47
48
49
50
51
52
53
54

def test_aligned_anchor_generator():
    if torch.cuda.is_available():
        device = 'cuda'
    else:
        device = 'cpu'

    anchor_generator_cfg = dict(
        type='AlignedAnchor3DRangeGenerator',
        ranges=[[-51.2, -51.2, -1.80, 51.2, 51.2, -1.80]],
zhangwenwei's avatar
zhangwenwei committed
55
        scales=[1, 2, 4],
56
        sizes=[
57
58
            [2.5981, 0.8660, 1.],  # 1.5/sqrt(3)
            [1.7321, 0.5774, 1.],  # 1/sqrt(3)
59
60
61
62
63
64
65
66
67
            [1., 1., 1.],
            [0.4, 0.4, 1],
        ],
        custom_values=[0, 0],
        rotations=[0, 1.57],
        size_per_range=False,
        reshape_out=True)

    featmap_sizes = [(256, 256), (128, 128), (64, 64)]
68
    anchor_generator = build_prior_generator(anchor_generator_cfg)
69
70
71
72
73
    assert anchor_generator.num_base_anchors == 8

    # check base anchors
    expected_grid_anchors = [
        torch.tensor([[
74
            -51.0000, -51.0000, -1.8000, 2.5981, 0.8660, 1.0000, 0.0000,
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
            0.0000, 0.0000
        ],
                      [
                          -51.0000, -51.0000, -1.8000, 0.4000, 0.4000, 1.0000,
                          1.5700, 0.0000, 0.0000
                      ],
                      [
                          -50.6000, -51.0000, -1.8000, 0.4000, 0.4000, 1.0000,
                          0.0000, 0.0000, 0.0000
                      ],
                      [
                          -50.2000, -51.0000, -1.8000, 1.0000, 1.0000, 1.0000,
                          1.5700, 0.0000, 0.0000
                      ],
                      [
                          -49.8000, -51.0000, -1.8000, 1.0000, 1.0000, 1.0000,
                          0.0000, 0.0000, 0.0000
                      ],
                      [
94
                          -49.4000, -51.0000, -1.8000, 1.7321, 0.5774, 1.0000,
95
96
97
                          1.5700, 0.0000, 0.0000
                      ],
                      [
98
                          -49.0000, -51.0000, -1.8000, 1.7321, 0.5774, 1.0000,
99
100
101
                          0.0000, 0.0000, 0.0000
                      ],
                      [
102
                          -48.6000, -51.0000, -1.8000, 2.5981, 0.8660, 1.0000,
103
104
105
106
                          1.5700, 0.0000, 0.0000
                      ]],
                     device=device),
        torch.tensor([[
107
            -50.8000, -50.8000, -1.8000, 5.1962, 1.7320, 2.0000, 0.0000,
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
            0.0000, 0.0000
        ],
                      [
                          -50.8000, -50.8000, -1.8000, 0.8000, 0.8000, 2.0000,
                          1.5700, 0.0000, 0.0000
                      ],
                      [
                          -50.0000, -50.8000, -1.8000, 0.8000, 0.8000, 2.0000,
                          0.0000, 0.0000, 0.0000
                      ],
                      [
                          -49.2000, -50.8000, -1.8000, 2.0000, 2.0000, 2.0000,
                          1.5700, 0.0000, 0.0000
                      ],
                      [
                          -48.4000, -50.8000, -1.8000, 2.0000, 2.0000, 2.0000,
                          0.0000, 0.0000, 0.0000
                      ],
                      [
127
                          -47.6000, -50.8000, -1.8000, 3.4642, 1.1548, 2.0000,
128
129
130
                          1.5700, 0.0000, 0.0000
                      ],
                      [
131
                          -46.8000, -50.8000, -1.8000, 3.4642, 1.1548, 2.0000,
132
133
134
                          0.0000, 0.0000, 0.0000
                      ],
                      [
135
                          -46.0000, -50.8000, -1.8000, 5.1962, 1.7320, 2.0000,
136
137
138
139
                          1.5700, 0.0000, 0.0000
                      ]],
                     device=device),
        torch.tensor([[
140
            -50.4000, -50.4000, -1.8000, 10.3924, 3.4640, 4.0000, 0.0000,
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
            0.0000, 0.0000
        ],
                      [
                          -50.4000, -50.4000, -1.8000, 1.6000, 1.6000, 4.0000,
                          1.5700, 0.0000, 0.0000
                      ],
                      [
                          -48.8000, -50.4000, -1.8000, 1.6000, 1.6000, 4.0000,
                          0.0000, 0.0000, 0.0000
                      ],
                      [
                          -47.2000, -50.4000, -1.8000, 4.0000, 4.0000, 4.0000,
                          1.5700, 0.0000, 0.0000
                      ],
                      [
                          -45.6000, -50.4000, -1.8000, 4.0000, 4.0000, 4.0000,
                          0.0000, 0.0000, 0.0000
                      ],
                      [
160
                          -44.0000, -50.4000, -1.8000, 6.9284, 2.3096, 4.0000,
161
162
163
                          1.5700, 0.0000, 0.0000
                      ],
                      [
164
                          -42.4000, -50.4000, -1.8000, 6.9284, 2.3096, 4.0000,
165
166
167
                          0.0000, 0.0000, 0.0000
                      ],
                      [
168
                          -40.8000, -50.4000, -1.8000, 10.3924, 3.4640, 4.0000,
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
                          1.5700, 0.0000, 0.0000
                      ]],
                     device=device)
    ]
    multi_level_anchors = anchor_generator.grid_anchors(
        featmap_sizes, device=device)
    expected_multi_level_shapes = [
        torch.Size([524288, 9]),
        torch.Size([131072, 9]),
        torch.Size([32768, 9])
    ]
    for i, single_level_anchor in enumerate(multi_level_anchors):
        assert single_level_anchor.shape == expected_multi_level_shapes[i]
        # set [:56:7] thus it could cover 8 (len(size) * len(rotations))
        # anchors on 8 location
        assert single_level_anchor[:56:7].allclose(expected_grid_anchors[i])
185
186
187
188
189
190
191
192
193
194
195
196


def test_aligned_anchor_generator_per_cls():
    if torch.cuda.is_available():
        device = 'cuda'
    else:
        device = 'cpu'

    anchor_generator_cfg = dict(
        type='AlignedAnchor3DRangeGeneratorPerCls',
        ranges=[[-100, -100, -1.80, 100, 100, -1.80],
                [-100, -100, -1.30, 100, 100, -1.30]],
197
        sizes=[[1.76, 0.63, 1.44], [2.35, 0.96, 1.59]],
198
199
200
201
202
        custom_values=[0, 0],
        rotations=[0, 1.57],
        reshape_out=False)

    featmap_sizes = [(100, 100), (50, 50)]
203
    anchor_generator = build_prior_generator(anchor_generator_cfg)
204
205
206
207

    # check base anchors
    expected_grid_anchors = [[
        torch.tensor([[
208
            -99.0000, -99.0000, -1.8000, 1.7600, 0.6300, 1.4400, 0.0000,
209
210
211
            0.0000, 0.0000
        ],
                      [
212
                          -99.0000, -99.0000, -1.8000, 1.7600, 0.6300, 1.4400,
213
214
215
216
                          1.5700, 0.0000, 0.0000
                      ]],
                     device=device),
        torch.tensor([[
217
            -98.0000, -98.0000, -1.3000, 2.3500, 0.9600, 1.5900, 0.0000,
218
219
220
            0.0000, 0.0000
        ],
                      [
221
                          -98.0000, -98.0000, -1.3000, 2.3500, 0.9600, 1.5900,
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
                          1.5700, 0.0000, 0.0000
                      ]],
                     device=device)
    ]]
    multi_level_anchors = anchor_generator.grid_anchors(
        featmap_sizes, device=device)
    expected_multi_level_shapes = [[
        torch.Size([20000, 9]), torch.Size([5000, 9])
    ]]
    for i, single_level_anchor in enumerate(multi_level_anchors):
        assert len(single_level_anchor) == len(expected_multi_level_shapes[i])
        # set [:2*interval:interval] thus it could cover
        # 2 (len(size) * len(rotations)) anchors on 2 location
        # Note that len(size) for each class is always 1 in this case
        for j in range(len(single_level_anchor)):
            interval = int(expected_multi_level_shapes[i][j][0] / 2)
            assert single_level_anchor[j][:2 * interval:interval].allclose(
                expected_grid_anchors[i][j])