test_box3d.py 62.7 KB
Newer Older
dingchang's avatar
dingchang committed
1
# Copyright (c) OpenMMLab. All rights reserved.
2
import numpy as np
zhangwenwei's avatar
zhangwenwei committed
3
import pytest
4
import torch
liyinhao's avatar
liyinhao committed
5
import unittest
6

liyinhao's avatar
liyinhao committed
7
8
from mmdet3d.core.bbox import (BaseInstance3DBoxes, Box3DMode,
                               CameraInstance3DBoxes, DepthInstance3DBoxes,
yinchimaoliang's avatar
yinchimaoliang committed
9
10
                               LiDARInstance3DBoxes, bbox3d2roi,
                               bbox3d_mapping_back)
liyinhao's avatar
liyinhao committed
11
12
13
14
from mmdet3d.core.bbox.structures.utils import (get_box_type, limit_period,
                                                points_cam2img,
                                                rotation_3d_in_axis,
                                                xywhr2xyxyr)
15
from mmdet3d.core.points import CameraPoints, DepthPoints, LiDARPoints
liyinhao's avatar
liyinhao committed
16
17


yinchimaoliang's avatar
yinchimaoliang committed
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
def test_bbox3d_mapping_back():
    bboxes = BaseInstance3DBoxes(
        [[
            -5.24223238e+00, 4.00209696e+01, 2.97570381e-01, 2.06200000e+00,
            4.40900000e+00, 1.54800000e+00, -1.48801203e+00
        ],
         [
             -2.66751588e+01, 5.59499564e+00, -9.14345860e-01, 3.43000000e-01,
             4.58000000e-01, 7.82000000e-01, -4.62759755e+00
         ],
         [
             -5.80979675e+00, 3.54092357e+01, 2.00889888e-01, 2.39600000e+00,
             3.96900000e+00, 1.73200000e+00, -4.65203216e+00
         ],
         [
             -3.13086877e+01, 1.09007628e+00, -1.94612112e-01, 1.94400000e+00,
             3.85700000e+00, 1.72300000e+00, -2.81427027e+00
         ]])
    new_bboxes = bbox3d_mapping_back(bboxes, 1.1, True, True)
    expected_new_bboxes = torch.tensor(
        [[-4.7657, 36.3827, 0.2705, 1.8745, 4.0082, 1.4073, -1.4880],
         [-24.2501, 5.0864, -0.8312, 0.3118, 0.4164, 0.7109, -4.6276],
         [-5.2816, 32.1902, 0.1826, 2.1782, 3.6082, 1.5745, -4.6520],
         [-28.4624, 0.9910, -0.1769, 1.7673, 3.5064, 1.5664, -2.8143]])
    assert torch.allclose(new_bboxes.tensor, expected_new_bboxes, atol=1e-4)


def test_bbox3d2roi():
    bbox_0 = torch.tensor(
        [[-5.2422, 4.0020, 2.9757, 2.0620, 4.4090, 1.5480, -1.4880],
         [-5.8097, 3.5409, 2.0088, 2.3960, 3.9690, 1.7320, -4.6520]])
    bbox_1 = torch.tensor(
        [[-2.6675, 5.5949, -9.1434, 3.4300, 4.5800, 7.8200, -4.6275],
         [-3.1308, 1.0900, -1.9461, 1.9440, 3.8570, 1.7230, -2.8142]])
    bbox_list = [bbox_0, bbox_1]
    rois = bbox3d2roi(bbox_list)
    expected_rois = torch.tensor(
        [[0.0000, -5.2422, 4.0020, 2.9757, 2.0620, 4.4090, 1.5480, -1.4880],
         [0.0000, -5.8097, 3.5409, 2.0088, 2.3960, 3.9690, 1.7320, -4.6520],
         [1.0000, -2.6675, 5.5949, -9.1434, 3.4300, 4.5800, 7.8200, -4.6275],
         [1.0000, -3.1308, 1.0900, -1.9461, 1.9440, 3.8570, 1.7230, -2.8142]])
    assert torch.all(torch.eq(rois, expected_rois))


liyinhao's avatar
liyinhao committed
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
def test_base_boxes3d():
    # test empty initialization
    empty_boxes = []
    boxes = BaseInstance3DBoxes(empty_boxes)
    assert boxes.tensor.shape[0] == 0
    assert boxes.tensor.shape[1] == 7

    # Test init with origin
    gravity_center_box = np.array(
        [[
            -5.24223238e+00, 4.00209696e+01, 2.97570381e-01, 2.06200000e+00,
            4.40900000e+00, 1.54800000e+00, -1.48801203e+00
        ],
         [
             -2.66751588e+01, 5.59499564e+00, -9.14345860e-01, 3.43000000e-01,
             4.58000000e-01, 7.82000000e-01, -4.62759755e+00
         ],
         [
             -5.80979675e+00, 3.54092357e+01, 2.00889888e-01, 2.39600000e+00,
             3.96900000e+00, 1.73200000e+00, -4.65203216e+00
         ],
         [
             -3.13086877e+01, 1.09007628e+00, -1.94612112e-01, 1.94400000e+00,
             3.85700000e+00, 1.72300000e+00, -2.81427027e+00
         ]],
        dtype=np.float32)

    bottom_center_box = BaseInstance3DBoxes(
        gravity_center_box, origin=(0.5, 0.5, 0.5))

    assert bottom_center_box.yaw.shape[0] == 4
93
94
95


def test_lidar_boxes3d():
zhangwenwei's avatar
zhangwenwei committed
96
97
98
99
100
101
    # test empty initialization
    empty_boxes = []
    boxes = LiDARInstance3DBoxes(empty_boxes)
    assert boxes.tensor.shape[0] == 0
    assert boxes.tensor.shape[1] == 7

zhangwenwei's avatar
zhangwenwei committed
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
    # Test init with origin
    gravity_center_box = np.array(
        [[
            -5.24223238e+00, 4.00209696e+01, 2.97570381e-01, 2.06200000e+00,
            4.40900000e+00, 1.54800000e+00, -1.48801203e+00
        ],
         [
             -2.66751588e+01, 5.59499564e+00, -9.14345860e-01, 3.43000000e-01,
             4.58000000e-01, 7.82000000e-01, -4.62759755e+00
         ],
         [
             -5.80979675e+00, 3.54092357e+01, 2.00889888e-01, 2.39600000e+00,
             3.96900000e+00, 1.73200000e+00, -4.65203216e+00
         ],
         [
             -3.13086877e+01, 1.09007628e+00, -1.94612112e-01, 1.94400000e+00,
             3.85700000e+00, 1.72300000e+00, -2.81427027e+00
         ]],
        dtype=np.float32)
    bottom_center_box = LiDARInstance3DBoxes(
wuyuefeng's avatar
wuyuefeng committed
122
        gravity_center_box, origin=(0.5, 0.5, 0.5))
zhangwenwei's avatar
zhangwenwei committed
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
    expected_tensor = torch.tensor(
        [[
            -5.24223238e+00, 4.00209696e+01, -4.76429619e-01, 2.06200000e+00,
            4.40900000e+00, 1.54800000e+00, -1.48801203e+00
        ],
         [
             -2.66751588e+01, 5.59499564e+00, -1.30534586e+00, 3.43000000e-01,
             4.58000000e-01, 7.82000000e-01, -4.62759755e+00
         ],
         [
             -5.80979675e+00, 3.54092357e+01, -6.65110112e-01, 2.39600000e+00,
             3.96900000e+00, 1.73200000e+00, -4.65203216e+00
         ],
         [
             -3.13086877e+01, 1.09007628e+00, -1.05611211e+00, 1.94400000e+00,
             3.85700000e+00, 1.72300000e+00, -2.81427027e+00
         ]])
    assert torch.allclose(expected_tensor, bottom_center_box.tensor)

142
143
144
145
146
147
148
149
    # Test init with numpy array
    np_boxes = np.array(
        [[1.7802081, 2.516249, -1.7501148, 1.75, 3.39, 1.65, 1.48],
         [8.959413, 2.4567227, -1.6357126, 1.54, 4.01, 1.57, 1.62]],
        dtype=np.float32)
    boxes_1 = LiDARInstance3DBoxes(np_boxes)
    assert torch.allclose(boxes_1.tensor, torch.from_numpy(np_boxes))

zhangwenwei's avatar
zhangwenwei committed
150
151
152
153
154
155
    # test properties
    assert boxes_1.volume.size(0) == 2
    assert (boxes_1.center == boxes_1.bottom_center).all()
    assert repr(boxes) == (
        'LiDARInstance3DBoxes(\n    tensor([], size=(0, 7)))')

156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
    # test init with torch.Tensor
    th_boxes = torch.tensor(
        [[
            28.29669987, -0.5557558, -1.30332506, 1.47000003, 2.23000002,
            1.48000002, -1.57000005
        ],
         [
             26.66901946, 21.82302134, -1.73605708, 1.55999994, 3.48000002,
             1.39999998, -1.69000006
         ],
         [
             31.31977974, 8.16214412, -1.62177875, 1.74000001, 3.76999998,
             1.48000002, 2.78999996
         ]],
        dtype=torch.float32)
    boxes_2 = LiDARInstance3DBoxes(th_boxes)
    assert torch.allclose(boxes_2.tensor, th_boxes)

    # test clone/to/device
    boxes_2 = boxes_2.clone()
    boxes_1 = boxes_1.to(boxes_2.device)

    # test box concatenation
    expected_tensor = torch.tensor(
        [[1.7802081, 2.516249, -1.7501148, 1.75, 3.39, 1.65, 1.48],
         [8.959413, 2.4567227, -1.6357126, 1.54, 4.01, 1.57, 1.62],
         [28.2967, -0.5557558, -1.303325, 1.47, 2.23, 1.48, -1.57],
         [26.66902, 21.82302, -1.736057, 1.56, 3.48, 1.4, -1.69],
         [31.31978, 8.162144, -1.6217787, 1.74, 3.77, 1.48, 2.79]])
    boxes = LiDARInstance3DBoxes.cat([boxes_1, boxes_2])
    assert torch.allclose(boxes.tensor, expected_tensor)
zhangwenwei's avatar
zhangwenwei committed
187
188
189
190
    # concatenate empty list
    empty_boxes = LiDARInstance3DBoxes.cat([])
    assert empty_boxes.tensor.shape[0] == 0
    assert empty_boxes.tensor.shape[-1] == 7
191
192

    # test box flip
liyinhao's avatar
liyinhao committed
193
194
195
196
197
    points = torch.tensor([[1.2559, -0.6762, -1.4658],
                           [4.7814, -0.8784,
                            -1.3857], [6.7053, 0.2517, -0.9697],
                           [0.6533, -0.5520, -0.5265],
                           [4.5870, 0.5358, -1.4741]])
198
199
200
201
202
203
    expected_tensor = torch.tensor(
        [[1.7802081, -2.516249, -1.7501148, 1.75, 3.39, 1.65, 1.6615927],
         [8.959413, -2.4567227, -1.6357126, 1.54, 4.01, 1.57, 1.5215927],
         [28.2967, 0.5557558, -1.303325, 1.47, 2.23, 1.48, 4.7115927],
         [26.66902, -21.82302, -1.736057, 1.56, 3.48, 1.4, 4.8315926],
         [31.31978, -8.162144, -1.6217787, 1.74, 3.77, 1.48, 0.35159278]])
liyinhao's avatar
liyinhao committed
204
205
206
207
208
209
    expected_points = torch.tensor([[1.2559, 0.6762, -1.4658],
                                    [4.7814, 0.8784, -1.3857],
                                    [6.7053, -0.2517, -0.9697],
                                    [0.6533, 0.5520, -0.5265],
                                    [4.5870, -0.5358, -1.4741]])
    points = boxes.flip('horizontal', points)
210
    assert torch.allclose(boxes.tensor, expected_tensor)
liyinhao's avatar
liyinhao committed
211
    assert torch.allclose(points, expected_points, 1e-3)
212

wuyuefeng's avatar
wuyuefeng committed
213
214
215
216
217
218
219
    expected_tensor = torch.tensor(
        [[-1.7802, -2.5162, -1.7501, 1.7500, 3.3900, 1.6500, -1.6616],
         [-8.9594, -2.4567, -1.6357, 1.5400, 4.0100, 1.5700, -1.5216],
         [-28.2967, 0.5558, -1.3033, 1.4700, 2.2300, 1.4800, -4.7116],
         [-26.6690, -21.8230, -1.7361, 1.5600, 3.4800, 1.4000, -4.8316],
         [-31.3198, -8.1621, -1.6218, 1.7400, 3.7700, 1.4800, -0.3516]])
    boxes_flip_vert = boxes.clone()
liyinhao's avatar
liyinhao committed
220
221
222
223
224
225
    points = boxes_flip_vert.flip('vertical', points)
    expected_points = torch.tensor([[-1.2559, 0.6762, -1.4658],
                                    [-4.7814, 0.8784, -1.3857],
                                    [-6.7053, -0.2517, -0.9697],
                                    [-0.6533, 0.5520, -0.5265],
                                    [-4.5870, -0.5358, -1.4741]])
wuyuefeng's avatar
wuyuefeng committed
226
    assert torch.allclose(boxes_flip_vert.tensor, expected_tensor, 1e-4)
liyinhao's avatar
liyinhao committed
227
    assert torch.allclose(points, expected_points)
wuyuefeng's avatar
wuyuefeng committed
228

229
    # test box rotation
230
    # with input torch.Tensor points and angle
231
    expected_tensor = torch.tensor(
liyinhao's avatar
liyinhao committed
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
        [[1.4225, -2.7344, -1.7501, 1.7500, 3.3900, 1.6500, 1.7976],
         [8.5435, -3.6491, -1.6357, 1.5400, 4.0100, 1.5700, 1.6576],
         [28.1106, -3.2869, -1.3033, 1.4700, 2.2300, 1.4800, 4.8476],
         [23.4630, -25.2382, -1.7361, 1.5600, 3.4800, 1.4000, 4.9676],
         [29.9235, -12.3342, -1.6218, 1.7400, 3.7700, 1.4800, 0.4876]])
    points, rot_mat_T = boxes.rotate(0.13603681398218053, points)
    expected_points = torch.tensor([[-1.1526, 0.8403, -1.4658],
                                    [-4.6181, 1.5187, -1.3857],
                                    [-6.6775, 0.6600, -0.9697],
                                    [-0.5724, 0.6355, -0.5265],
                                    [-4.6173, 0.0912, -1.4741]])
    expected_rot_mat_T = torch.tensor([[0.9908, -0.1356, 0.0000],
                                       [0.1356, 0.9908, 0.0000],
                                       [0.0000, 0.0000, 1.0000]])
    assert torch.allclose(boxes.tensor, expected_tensor, 1e-3)
    assert torch.allclose(points, expected_points, 1e-3)
    assert torch.allclose(rot_mat_T, expected_rot_mat_T, 1e-3)

250
251
252
253
254
255
256
257
258
259
    # with input torch.Tensor points and rotation matrix
    points, rot_mat_T = boxes.rotate(-0.13603681398218053, points)  # back
    rot_mat = np.array([[0.99076125, -0.13561762, 0.],
                        [0.13561762, 0.99076125, 0.], [0., 0., 1.]])
    points, rot_mat_T = boxes.rotate(rot_mat, points)
    assert torch.allclose(boxes.tensor, expected_tensor, 1e-3)
    assert torch.allclose(points, expected_points, 1e-3)
    assert torch.allclose(rot_mat_T, expected_rot_mat_T, 1e-3)

    # with input np.ndarray points and angle
liyinhao's avatar
liyinhao committed
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
    points_np = np.array([[-1.0280, 0.9888,
                           -1.4658], [-4.3695, 2.1310, -1.3857],
                          [-6.5263, 1.5595,
                           -0.9697], [-0.4809, 0.7073, -0.5265],
                          [-4.5623, 0.7166, -1.4741]])
    points_np, rot_mat_T_np = boxes.rotate(0.13603681398218053, points_np)
    expected_points_np = np.array([[-0.8844, 1.1191, -1.4658],
                                   [-4.0401, 2.7039, -1.3857],
                                   [-6.2545, 2.4302, -0.9697],
                                   [-0.3805, 0.7660, -0.5265],
                                   [-4.4230, 1.3287, -1.4741]])
    expected_rot_mat_T_np = np.array([[0.9908, -0.1356, 0.0000],
                                      [0.1356, 0.9908, 0.0000],
                                      [0.0000, 0.0000, 1.0000]])

    assert np.allclose(points_np, expected_points_np, 1e-3)
    assert np.allclose(rot_mat_T_np, expected_rot_mat_T_np, 1e-3)
277

278
279
280
281
282
283
284
285
286
    # with input LiDARPoints and rotation matrix
    points_np, rot_mat_T_np = boxes.rotate(-0.13603681398218053, points_np)
    lidar_points = LiDARPoints(points_np)
    lidar_points, rot_mat_T_np = boxes.rotate(rot_mat, lidar_points)
    points_np = lidar_points.tensor.numpy()

    assert np.allclose(points_np, expected_points_np, 1e-3)
    assert np.allclose(rot_mat_T_np, expected_rot_mat_T_np, 1e-3)

287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
    # test box scaling
    expected_tensor = torch.tensor([[
        1.0443488, -2.9183323, -1.7599131, 1.7597977, 3.4089797, 1.6592377,
        1.9336663
    ],
                                    [
                                        8.014273, -4.8007393, -1.6448704,
                                        1.5486219, 4.0324507, 1.57879,
                                        1.7936664
                                    ],
                                    [
                                        27.558605, -7.1084175, -1.310622,
                                        1.4782301, 2.242485, 1.488286,
                                        4.9836664
                                    ],
                                    [
                                        19.934517, -28.344835, -1.7457767,
                                        1.5687338, 3.4994833, 1.4078381,
                                        5.1036663
                                    ],
                                    [
                                        28.130915, -16.369587, -1.6308585,
                                        1.7497417, 3.791107, 1.488286,
                                        0.6236664
                                    ]])
    boxes.scale(1.00559866335275)
    assert torch.allclose(boxes.tensor, expected_tensor)

    # test box translation
    expected_tensor = torch.tensor([[
        1.1281544, -3.0507944, -1.9169292, 1.7597977, 3.4089797, 1.6592377,
        1.9336663
    ],
                                    [
                                        8.098079, -4.9332013, -1.8018866,
                                        1.5486219, 4.0324507, 1.57879,
                                        1.7936664
                                    ],
                                    [
                                        27.64241, -7.2408795, -1.4676381,
                                        1.4782301, 2.242485, 1.488286,
                                        4.9836664
                                    ],
                                    [
                                        20.018322, -28.477297, -1.9027928,
                                        1.5687338, 3.4994833, 1.4078381,
                                        5.1036663
                                    ],
                                    [
                                        28.21472, -16.502048, -1.7878747,
                                        1.7497417, 3.791107, 1.488286,
                                        0.6236664
                                    ]])
    boxes.translate([0.0838056, -0.13246193, -0.15701613])
    assert torch.allclose(boxes.tensor, expected_tensor)

    # test bbox in_range_bev
    expected_tensor = torch.tensor([1, 1, 1, 1, 1], dtype=torch.bool)
    mask = boxes.in_range_bev([0., -40., 70.4, 40.])
    assert (mask == expected_tensor).all()
    mask = boxes.nonempty()
    assert (mask == expected_tensor).all()

zhangwenwei's avatar
zhangwenwei committed
350
351
352
353
354
    # test bbox in_range
    expected_tensor = torch.tensor([1, 1, 0, 0, 0], dtype=torch.bool)
    mask = boxes.in_range_3d([0, -20, -2, 22, 2, 5])
    assert (mask == expected_tensor).all()

355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
    # test bbox indexing
    index_boxes = boxes[2:5]
    expected_tensor = torch.tensor([[
        27.64241, -7.2408795, -1.4676381, 1.4782301, 2.242485, 1.488286,
        4.9836664
    ],
                                    [
                                        20.018322, -28.477297, -1.9027928,
                                        1.5687338, 3.4994833, 1.4078381,
                                        5.1036663
                                    ],
                                    [
                                        28.21472, -16.502048, -1.7878747,
                                        1.7497417, 3.791107, 1.488286,
                                        0.6236664
                                    ]])
    assert len(index_boxes) == 3
    assert torch.allclose(index_boxes.tensor, expected_tensor)

    index_boxes = boxes[2]
    expected_tensor = torch.tensor([[
        27.64241, -7.2408795, -1.4676381, 1.4782301, 2.242485, 1.488286,
        4.9836664
    ]])
    assert len(index_boxes) == 1
    assert torch.allclose(index_boxes.tensor, expected_tensor)

    index_boxes = boxes[[2, 4]]
    expected_tensor = torch.tensor([[
        27.64241, -7.2408795, -1.4676381, 1.4782301, 2.242485, 1.488286,
        4.9836664
    ],
                                    [
                                        28.21472, -16.502048, -1.7878747,
                                        1.7497417, 3.791107, 1.488286,
                                        0.6236664
                                    ]])
    assert len(index_boxes) == 2
    assert torch.allclose(index_boxes.tensor, expected_tensor)

    # test iteration
    for i, box in enumerate(index_boxes):
        torch.allclose(box, expected_tensor[i])
zhangwenwei's avatar
zhangwenwei committed
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426

    # test properties
    assert torch.allclose(boxes.bottom_center, boxes.tensor[:, :3])
    expected_tensor = (
        boxes.tensor[:, :3] - boxes.tensor[:, 3:6] *
        (torch.tensor([0.5, 0.5, 0]) - torch.tensor([0.5, 0.5, 0.5])))
    assert torch.allclose(boxes.gravity_center, expected_tensor)

    boxes.limit_yaw()
    assert (boxes.tensor[:, 6] <= np.pi / 2).all()
    assert (boxes.tensor[:, 6] >= -np.pi / 2).all()

    Box3DMode.convert(boxes, Box3DMode.LIDAR, Box3DMode.LIDAR)
    expected_tesor = boxes.tensor.clone()
    assert torch.allclose(expected_tesor, boxes.tensor)

    boxes.flip()
    boxes.flip()
    boxes.limit_yaw()
    assert torch.allclose(expected_tesor, boxes.tensor)

    # test nearest_bev
    expected_tensor = torch.tensor([[-0.5763, -3.9307, 2.8326, -2.1709],
                                    [6.0819, -5.7075, 10.1143, -4.1589],
                                    [26.5212, -7.9800, 28.7637, -6.5018],
                                    [18.2686, -29.2617, 21.7681, -27.6929],
                                    [27.3398, -18.3976, 29.0896, -14.6065]])
    # the pytorch print loses some precision
    assert torch.allclose(
wuyuefeng's avatar
wuyuefeng committed
427
        boxes.nearest_bev, expected_tensor, rtol=1e-4, atol=1e-7)
zhangwenwei's avatar
zhangwenwei committed
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471

    # obtained by the print of the original implementation
    expected_tensor = torch.tensor([[[2.4093e+00, -4.4784e+00, -1.9169e+00],
                                     [2.4093e+00, -4.4784e+00, -2.5769e-01],
                                     [-7.7767e-01, -3.2684e+00, -2.5769e-01],
                                     [-7.7767e-01, -3.2684e+00, -1.9169e+00],
                                     [3.0340e+00, -2.8332e+00, -1.9169e+00],
                                     [3.0340e+00, -2.8332e+00, -2.5769e-01],
                                     [-1.5301e-01, -1.6232e+00, -2.5769e-01],
                                     [-1.5301e-01, -1.6232e+00, -1.9169e+00]],
                                    [[9.8933e+00, -6.1340e+00, -1.8019e+00],
                                     [9.8933e+00, -6.1340e+00, -2.2310e-01],
                                     [5.9606e+00, -5.2427e+00, -2.2310e-01],
                                     [5.9606e+00, -5.2427e+00, -1.8019e+00],
                                     [1.0236e+01, -4.6237e+00, -1.8019e+00],
                                     [1.0236e+01, -4.6237e+00, -2.2310e-01],
                                     [6.3029e+00, -3.7324e+00, -2.2310e-01],
                                     [6.3029e+00, -3.7324e+00, -1.8019e+00]],
                                    [[2.8525e+01, -8.2534e+00, -1.4676e+00],
                                     [2.8525e+01, -8.2534e+00, 2.0648e-02],
                                     [2.6364e+01, -7.6525e+00, 2.0648e-02],
                                     [2.6364e+01, -7.6525e+00, -1.4676e+00],
                                     [2.8921e+01, -6.8292e+00, -1.4676e+00],
                                     [2.8921e+01, -6.8292e+00, 2.0648e-02],
                                     [2.6760e+01, -6.2283e+00, 2.0648e-02],
                                     [2.6760e+01, -6.2283e+00, -1.4676e+00]],
                                    [[2.1337e+01, -2.9870e+01, -1.9028e+00],
                                     [2.1337e+01, -2.9870e+01, -4.9495e-01],
                                     [1.8102e+01, -2.8535e+01, -4.9495e-01],
                                     [1.8102e+01, -2.8535e+01, -1.9028e+00],
                                     [2.1935e+01, -2.8420e+01, -1.9028e+00],
                                     [2.1935e+01, -2.8420e+01, -4.9495e-01],
                                     [1.8700e+01, -2.7085e+01, -4.9495e-01],
                                     [1.8700e+01, -2.7085e+01, -1.9028e+00]],
                                    [[2.6398e+01, -1.7530e+01, -1.7879e+00],
                                     [2.6398e+01, -1.7530e+01, -2.9959e-01],
                                     [2.8612e+01, -1.4452e+01, -2.9959e-01],
                                     [2.8612e+01, -1.4452e+01, -1.7879e+00],
                                     [2.7818e+01, -1.8552e+01, -1.7879e+00],
                                     [2.7818e+01, -1.8552e+01, -2.9959e-01],
                                     [3.0032e+01, -1.5474e+01, -2.9959e-01],
                                     [3.0032e+01, -1.5474e+01, -1.7879e+00]]])
    # the pytorch print loses some precision
    assert torch.allclose(boxes.corners, expected_tensor, rtol=1e-4, atol=1e-7)
zhangwenwei's avatar
zhangwenwei committed
472

wuyuefeng's avatar
wuyuefeng committed
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
    # test new_box
    new_box1 = boxes.new_box([[1, 2, 3, 4, 5, 6, 7]])
    assert torch.allclose(
        new_box1.tensor,
        torch.tensor([[1, 2, 3, 4, 5, 6, 7]], dtype=boxes.tensor.dtype))
    assert new_box1.device == boxes.device
    assert new_box1.with_yaw == boxes.with_yaw
    assert new_box1.box_dim == boxes.box_dim

    new_box2 = boxes.new_box(np.array([[1, 2, 3, 4, 5, 6, 7]]))
    assert torch.allclose(
        new_box2.tensor,
        torch.tensor([[1, 2, 3, 4, 5, 6, 7]], dtype=boxes.tensor.dtype))

    new_box3 = boxes.new_box(torch.tensor([[1, 2, 3, 4, 5, 6, 7]]))
    assert torch.allclose(
        new_box3.tensor,
        torch.tensor([[1, 2, 3, 4, 5, 6, 7]], dtype=boxes.tensor.dtype))

zhangwenwei's avatar
zhangwenwei committed
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506

def test_boxes_conversion():
    """Test the conversion of boxes between different modes.

    ComandLine:
        xdoctest tests/test_box3d.py::test_boxes_conversion zero
    """
    lidar_boxes = LiDARInstance3DBoxes(
        [[1.7802081, 2.516249, -1.7501148, 1.75, 3.39, 1.65, 1.48],
         [8.959413, 2.4567227, -1.6357126, 1.54, 4.01, 1.57, 1.62],
         [28.2967, -0.5557558, -1.303325, 1.47, 2.23, 1.48, -1.57],
         [26.66902, 21.82302, -1.736057, 1.56, 3.48, 1.4, -1.69],
         [31.31978, 8.162144, -1.6217787, 1.74, 3.77, 1.48, 2.79]])
    cam_box_tensor = Box3DMode.convert(lidar_boxes.tensor, Box3DMode.LIDAR,
                                       Box3DMode.CAM)
zhangwenwei's avatar
zhangwenwei committed
507
508
    expected_box = lidar_boxes.convert_to(Box3DMode.CAM)
    assert torch.equal(expected_box.tensor, cam_box_tensor)
509
510
511
512

    # Some properties should be the same
    cam_boxes = CameraInstance3DBoxes(cam_box_tensor)
    assert torch.equal(cam_boxes.height, lidar_boxes.height)
zhangwenwei's avatar
zhangwenwei committed
513
514
515
    assert torch.equal(cam_boxes.top_height, -lidar_boxes.top_height)
    assert torch.equal(cam_boxes.bottom_height, -lidar_boxes.bottom_height)
    assert torch.allclose(cam_boxes.volume, lidar_boxes.volume)
516

zhangwenwei's avatar
zhangwenwei committed
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
    lidar_box_tensor = Box3DMode.convert(cam_box_tensor, Box3DMode.CAM,
                                         Box3DMode.LIDAR)
    expected_tensor = torch.tensor(
        [[1.7802081, 2.516249, -1.7501148, 1.75, 3.39, 1.65, 1.48],
         [8.959413, 2.4567227, -1.6357126, 1.54, 4.01, 1.57, 1.62],
         [28.2967, -0.5557558, -1.303325, 1.47, 2.23, 1.48, -1.57],
         [26.66902, 21.82302, -1.736057, 1.56, 3.48, 1.4, -1.69],
         [31.31978, 8.162144, -1.6217787, 1.74, 3.77, 1.48, 2.79]])

    assert torch.allclose(expected_tensor, lidar_box_tensor)
    assert torch.allclose(lidar_boxes.tensor, lidar_box_tensor)

    depth_box_tensor = Box3DMode.convert(cam_box_tensor, Box3DMode.CAM,
                                         Box3DMode.DEPTH)
    depth_to_cam_box_tensor = Box3DMode.convert(depth_box_tensor,
                                                Box3DMode.DEPTH, Box3DMode.CAM)
    assert torch.allclose(cam_box_tensor, depth_to_cam_box_tensor)

zhangwenwei's avatar
zhangwenwei committed
535
536
537
    # test similar mode conversion
    same_results = Box3DMode.convert(depth_box_tensor, Box3DMode.DEPTH,
                                     Box3DMode.DEPTH)
zhangwenwei's avatar
zhangwenwei committed
538
    assert torch.equal(same_results, depth_box_tensor)
zhangwenwei's avatar
zhangwenwei committed
539

zhangwenwei's avatar
zhangwenwei committed
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
    # test conversion with a given rt_mat
    camera_boxes = CameraInstance3DBoxes(
        [[0.06, 1.77, 21.4, 3.2, 1.61, 1.66, -1.54],
         [6.59, 1.53, 6.76, 12.78, 3.66, 2.28, 1.55],
         [6.71, 1.59, 22.18, 14.73, 3.64, 2.32, 1.59],
         [7.11, 1.58, 34.54, 10.04, 3.61, 2.32, 1.61],
         [7.78, 1.65, 45.95, 12.83, 3.63, 2.34, 1.64]])

    rect = torch.tensor(
        [[0.9999239, 0.00983776, -0.00744505, 0.],
         [-0.0098698, 0.9999421, -0.00427846, 0.],
         [0.00740253, 0.00435161, 0.9999631, 0.], [0., 0., 0., 1.]],
        dtype=torch.float32)

    Trv2c = torch.tensor(
        [[7.533745e-03, -9.999714e-01, -6.166020e-04, -4.069766e-03],
         [1.480249e-02, 7.280733e-04, -9.998902e-01, -7.631618e-02],
         [9.998621e-01, 7.523790e-03, 1.480755e-02, -2.717806e-01],
         [0.000000e+00, 0.000000e+00, 0.000000e+00, 1.000000e+00]],
        dtype=torch.float32)

    expected_tensor = torch.tensor(
        [[
            2.16902434e+01, -4.06038554e-02, -1.61906639e+00, 1.65999997e+00,
            3.20000005e+00, 1.61000001e+00, -1.53999996e+00
        ],
         [
             7.05006905e+00, -6.57459601e+00, -1.60107949e+00, 2.27999997e+00,
             1.27799997e+01, 3.66000009e+00, 1.54999995e+00
         ],
         [
             2.24698818e+01, -6.69203759e+00, -1.50118145e+00, 2.31999993e+00,
             1.47299995e+01, 3.64000010e+00, 1.59000003e+00
         ],
         [
             3.48291965e+01, -7.09058388e+00, -1.36622983e+00, 2.31999993e+00,
             1.00400000e+01, 3.60999990e+00, 1.61000001e+00
         ],
         [
             4.62394617e+01, -7.75838800e+00, -1.32405020e+00, 2.33999991e+00,
             1.28299999e+01, 3.63000011e+00, 1.63999999e+00
         ]],
        dtype=torch.float32)

    rt_mat = rect @ Trv2c
zhangwenwei's avatar
zhangwenwei committed
585
586
    # test coversion with Box type
    cam_to_lidar_box = Box3DMode.convert(camera_boxes, Box3DMode.CAM,
zhangwenwei's avatar
zhangwenwei committed
587
                                         Box3DMode.LIDAR, rt_mat.inverse())
zhangwenwei's avatar
zhangwenwei committed
588
    assert torch.allclose(cam_to_lidar_box.tensor, expected_tensor)
zhangwenwei's avatar
zhangwenwei committed
589

zhangwenwei's avatar
zhangwenwei committed
590
591
592
    lidar_to_cam_box = Box3DMode.convert(cam_to_lidar_box.tensor,
                                         Box3DMode.LIDAR, Box3DMode.CAM,
                                         rt_mat)
zhangwenwei's avatar
zhangwenwei committed
593
    assert torch.allclose(lidar_to_cam_box, camera_boxes.tensor)
zhangwenwei's avatar
zhangwenwei committed
594
595
596
597
598
599
600
601
602
603
604
605
606
607

    # test numpy convert
    cam_to_lidar_box = Box3DMode.convert(camera_boxes.tensor.numpy(),
                                         Box3DMode.CAM, Box3DMode.LIDAR,
                                         rt_mat.inverse().numpy())
    assert np.allclose(cam_to_lidar_box, expected_tensor.numpy())

    # test list convert
    cam_to_lidar_box = Box3DMode.convert(
        camera_boxes.tensor[0].numpy().tolist(), Box3DMode.CAM,
        Box3DMode.LIDAR,
        rt_mat.inverse().numpy())
    assert np.allclose(np.array(cam_to_lidar_box), expected_tensor[0].numpy())

wuyuefeng's avatar
wuyuefeng committed
608
609
610
611
612
613
    # test convert from depth to lidar
    depth_boxes = torch.tensor(
        [[2.4593, 2.5870, -0.4321, 0.8597, 0.6193, 1.0204, 3.0693],
         [1.4856, 2.5299, -0.5570, 0.9385, 2.1404, 0.8954, 3.0601]],
        dtype=torch.float32)
    depth_boxes = DepthInstance3DBoxes(depth_boxes)
zhangwenwei's avatar
zhangwenwei committed
614
615
616
617
    depth_to_lidar_box = depth_boxes.convert_to(Box3DMode.LIDAR)
    expected_box = depth_to_lidar_box.convert_to(Box3DMode.DEPTH)
    assert torch.equal(depth_boxes.tensor, expected_box.tensor)

wuyuefeng's avatar
wuyuefeng committed
618
619
620
621
622
623
624
625
626
627
    lidar_to_depth_box = Box3DMode.convert(depth_to_lidar_box, Box3DMode.LIDAR,
                                           Box3DMode.DEPTH)
    assert torch.allclose(depth_boxes.tensor, lidar_to_depth_box.tensor)
    assert torch.allclose(depth_boxes.volume, lidar_to_depth_box.volume)

    # test convert from depth to camera
    depth_to_cam_box = Box3DMode.convert(depth_boxes, Box3DMode.DEPTH,
                                         Box3DMode.CAM)
    cam_to_depth_box = Box3DMode.convert(depth_to_cam_box, Box3DMode.CAM,
                                         Box3DMode.DEPTH)
zhangwenwei's avatar
zhangwenwei committed
628
629
    expected_tensor = depth_to_cam_box.convert_to(Box3DMode.DEPTH)
    assert torch.equal(expected_tensor.tensor, cam_to_depth_box.tensor)
wuyuefeng's avatar
wuyuefeng committed
630
631
632
633
634
635
636
    assert torch.allclose(depth_boxes.tensor, cam_to_depth_box.tensor)
    assert torch.allclose(depth_boxes.volume, cam_to_depth_box.volume)

    with pytest.raises(NotImplementedError):
        # assert invalid convert mode
        Box3DMode.convert(depth_boxes, Box3DMode.DEPTH, 3)

zhangwenwei's avatar
zhangwenwei committed
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688

def test_camera_boxes3d():
    # Test init with numpy array
    np_boxes = np.array(
        [[1.7802081, 2.516249, -1.7501148, 1.75, 3.39, 1.65, 1.48],
         [8.959413, 2.4567227, -1.6357126, 1.54, 4.01, 1.57, 1.62]],
        dtype=np.float32)

    boxes_1 = Box3DMode.convert(
        LiDARInstance3DBoxes(np_boxes), Box3DMode.LIDAR, Box3DMode.CAM)
    assert isinstance(boxes_1, CameraInstance3DBoxes)

    cam_np_boxes = Box3DMode.convert(np_boxes, Box3DMode.LIDAR, Box3DMode.CAM)
    assert torch.allclose(boxes_1.tensor,
                          boxes_1.tensor.new_tensor(cam_np_boxes))

    # test init with torch.Tensor
    th_boxes = torch.tensor(
        [[
            28.29669987, -0.5557558, -1.30332506, 1.47000003, 2.23000002,
            1.48000002, -1.57000005
        ],
         [
             26.66901946, 21.82302134, -1.73605708, 1.55999994, 3.48000002,
             1.39999998, -1.69000006
         ],
         [
             31.31977974, 8.16214412, -1.62177875, 1.74000001, 3.76999998,
             1.48000002, 2.78999996
         ]],
        dtype=torch.float32)
    cam_th_boxes = Box3DMode.convert(th_boxes, Box3DMode.LIDAR, Box3DMode.CAM)
    boxes_2 = CameraInstance3DBoxes(cam_th_boxes)
    assert torch.allclose(boxes_2.tensor, cam_th_boxes)

    # test clone/to/device
    boxes_2 = boxes_2.clone()
    boxes_1 = boxes_1.to(boxes_2.device)

    # test box concatenation
    expected_tensor = Box3DMode.convert(
        torch.tensor(
            [[1.7802081, 2.516249, -1.7501148, 1.75, 3.39, 1.65, 1.48],
             [8.959413, 2.4567227, -1.6357126, 1.54, 4.01, 1.57, 1.62],
             [28.2967, -0.5557558, -1.303325, 1.47, 2.23, 1.48, -1.57],
             [26.66902, 21.82302, -1.736057, 1.56, 3.48, 1.4, -1.69],
             [31.31978, 8.162144, -1.6217787, 1.74, 3.77, 1.48, 2.79]]),
        Box3DMode.LIDAR, Box3DMode.CAM)
    boxes = CameraInstance3DBoxes.cat([boxes_1, boxes_2])
    assert torch.allclose(boxes.tensor, expected_tensor)

    # test box flip
liyinhao's avatar
liyinhao committed
689
690
691
    points = torch.tensor([[0.6762, 1.4658, 1.2559], [0.8784, 1.3857, 4.7814],
                           [-0.2517, 0.9697, 6.7053], [0.5520, 0.5265, 0.6533],
                           [-0.5358, 1.4741, 4.5870]])
zhangwenwei's avatar
zhangwenwei committed
692
693
694
695
696
697
698
699
    expected_tensor = Box3DMode.convert(
        torch.tensor(
            [[1.7802081, -2.516249, -1.7501148, 1.75, 3.39, 1.65, 1.6615927],
             [8.959413, -2.4567227, -1.6357126, 1.54, 4.01, 1.57, 1.5215927],
             [28.2967, 0.5557558, -1.303325, 1.47, 2.23, 1.48, 4.7115927],
             [26.66902, -21.82302, -1.736057, 1.56, 3.48, 1.4, 4.8315926],
             [31.31978, -8.162144, -1.6217787, 1.74, 3.77, 1.48, 0.35159278]]),
        Box3DMode.LIDAR, Box3DMode.CAM)
liyinhao's avatar
liyinhao committed
700
701
702
703
704
705
    points = boxes.flip('horizontal', points)
    expected_points = torch.tensor([[-0.6762, 1.4658, 1.2559],
                                    [-0.8784, 1.3857, 4.7814],
                                    [0.2517, 0.9697, 6.7053],
                                    [-0.5520, 0.5265, 0.6533],
                                    [0.5358, 1.4741, 4.5870]])
zhangwenwei's avatar
zhangwenwei committed
706
    assert torch.allclose(boxes.tensor, expected_tensor)
liyinhao's avatar
liyinhao committed
707
    assert torch.allclose(points, expected_points, 1e-3)
zhangwenwei's avatar
zhangwenwei committed
708

wuyuefeng's avatar
wuyuefeng committed
709
710
711
712
713
714
715
    expected_tensor = torch.tensor(
        [[2.5162, 1.7501, -1.7802, 3.3900, 1.6500, 1.7500, -1.6616],
         [2.4567, 1.6357, -8.9594, 4.0100, 1.5700, 1.5400, -1.5216],
         [-0.5558, 1.3033, -28.2967, 2.2300, 1.4800, 1.4700, -4.7116],
         [21.8230, 1.7361, -26.6690, 3.4800, 1.4000, 1.5600, -4.8316],
         [8.1621, 1.6218, -31.3198, 3.7700, 1.4800, 1.7400, -0.3516]])
    boxes_flip_vert = boxes.clone()
liyinhao's avatar
liyinhao committed
716
717
718
719
720
721
    points = boxes_flip_vert.flip('vertical', points)
    expected_points = torch.tensor([[-0.6762, 1.4658, -1.2559],
                                    [-0.8784, 1.3857, -4.7814],
                                    [0.2517, 0.9697, -6.7053],
                                    [-0.5520, 0.5265, -0.6533],
                                    [0.5358, 1.4741, -4.5870]])
wuyuefeng's avatar
wuyuefeng committed
722
    assert torch.allclose(boxes_flip_vert.tensor, expected_tensor, 1e-4)
liyinhao's avatar
liyinhao committed
723
    assert torch.allclose(points, expected_points)
wuyuefeng's avatar
wuyuefeng committed
724

zhangwenwei's avatar
zhangwenwei committed
725
    # test box rotation
726
    # with input torch.Tensor points and angle
zhangwenwei's avatar
zhangwenwei committed
727
728
    expected_tensor = Box3DMode.convert(
        torch.tensor(
liyinhao's avatar
liyinhao committed
729
730
731
732
733
            [[1.4225, -2.7344, -1.7501, 1.7500, 3.3900, 1.6500, 1.7976],
             [8.5435, -3.6491, -1.6357, 1.5400, 4.0100, 1.5700, 1.6576],
             [28.1106, -3.2869, -1.3033, 1.4700, 2.2300, 1.4800, 4.8476],
             [23.4630, -25.2382, -1.7361, 1.5600, 3.4800, 1.4000, 4.9676],
             [29.9235, -12.3342, -1.6218, 1.7400, 3.7700, 1.4800, 0.4876]]),
zhangwenwei's avatar
zhangwenwei committed
734
        Box3DMode.LIDAR, Box3DMode.CAM)
liyinhao's avatar
liyinhao committed
735
736
737
738
739
740
741
742
743
744
745
746
747
    points, rot_mat_T = boxes.rotate(torch.tensor(0.13603681398218053), points)
    expected_points = torch.tensor([[-0.8403, 1.4658, -1.1526],
                                    [-1.5187, 1.3857, -4.6181],
                                    [-0.6600, 0.9697, -6.6775],
                                    [-0.6355, 0.5265, -0.5724],
                                    [-0.0912, 1.4741, -4.6173]])
    expected_rot_mat_T = torch.tensor([[0.9908, 0.0000, -0.1356],
                                       [0.0000, 1.0000, 0.0000],
                                       [0.1356, 0.0000, 0.9908]])
    assert torch.allclose(boxes.tensor, expected_tensor, 1e-3)
    assert torch.allclose(points, expected_points, 1e-3)
    assert torch.allclose(rot_mat_T, expected_rot_mat_T, 1e-3)

748
749
750
751
752
753
754
755
756
757
758
    # with input torch.Tensor points and rotation matrix
    points, rot_mat_T = boxes.rotate(
        torch.tensor(-0.13603681398218053), points)  # back
    rot_mat = np.array([[0.99076125, 0., -0.13561762], [0., 1., 0.],
                        [0.13561762, 0., 0.99076125]])
    points, rot_mat_T = boxes.rotate(rot_mat, points)
    assert torch.allclose(boxes.tensor, expected_tensor, 1e-3)
    assert torch.allclose(points, expected_points, 1e-3)
    assert torch.allclose(rot_mat_T, expected_rot_mat_T, 1e-3)

    # with input np.ndarray points and angle
liyinhao's avatar
liyinhao committed
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
    points_np = np.array([[0.6762, 1.2559, -1.4658, 2.5359],
                          [0.8784, 4.7814, -1.3857, 0.7167],
                          [-0.2517, 6.7053, -0.9697, 0.5599],
                          [0.5520, 0.6533, -0.5265, 1.0032],
                          [-0.5358, 4.5870, -1.4741, 0.0556]])
    points_np, rot_mat_T_np = boxes.rotate(
        torch.tensor(0.13603681398218053), points_np)
    expected_points_np = np.array([[0.4712, 1.2559, -1.5440, 2.5359],
                                   [0.6824, 4.7814, -1.4920, 0.7167],
                                   [-0.3809, 6.7053, -0.9266, 0.5599],
                                   [0.4755, 0.6533, -0.5965, 1.0032],
                                   [-0.7308, 4.5870, -1.3878, 0.0556]])
    expected_rot_mat_T_np = np.array([[0.9908, 0.0000, -0.1356],
                                      [0.0000, 1.0000, 0.0000],
                                      [0.1356, 0.0000, 0.9908]])

    assert np.allclose(points_np, expected_points_np, 1e-3)
    assert np.allclose(rot_mat_T_np, expected_rot_mat_T_np, 1e-3)
zhangwenwei's avatar
zhangwenwei committed
777

778
779
780
781
782
783
784
785
786
    # with input CameraPoints and rotation matrix
    points_np, rot_mat_T_np = boxes.rotate(
        torch.tensor(-0.13603681398218053), points_np)
    camera_points = CameraPoints(points_np, points_dim=4)
    camera_points, rot_mat_T_np = boxes.rotate(rot_mat, camera_points)
    points_np = camera_points.tensor.numpy()
    assert np.allclose(points_np, expected_points_np, 1e-3)
    assert np.allclose(rot_mat_T_np, expected_rot_mat_T_np, 1e-3)

zhangwenwei's avatar
zhangwenwei committed
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
    # test box scaling
    expected_tensor = Box3DMode.convert(
        torch.tensor([[
            1.0443488, -2.9183323, -1.7599131, 1.7597977, 3.4089797, 1.6592377,
            1.9336663
        ],
                      [
                          8.014273, -4.8007393, -1.6448704, 1.5486219,
                          4.0324507, 1.57879, 1.7936664
                      ],
                      [
                          27.558605, -7.1084175, -1.310622, 1.4782301,
                          2.242485, 1.488286, 4.9836664
                      ],
                      [
                          19.934517, -28.344835, -1.7457767, 1.5687338,
                          3.4994833, 1.4078381, 5.1036663
                      ],
                      [
                          28.130915, -16.369587, -1.6308585, 1.7497417,
                          3.791107, 1.488286, 0.6236664
                      ]]), Box3DMode.LIDAR, Box3DMode.CAM)
    boxes.scale(1.00559866335275)
    assert torch.allclose(boxes.tensor, expected_tensor)

    # test box translation
    expected_tensor = Box3DMode.convert(
        torch.tensor([[
            1.1281544, -3.0507944, -1.9169292, 1.7597977, 3.4089797, 1.6592377,
            1.9336663
        ],
                      [
                          8.098079, -4.9332013, -1.8018866, 1.5486219,
                          4.0324507, 1.57879, 1.7936664
                      ],
                      [
                          27.64241, -7.2408795, -1.4676381, 1.4782301,
                          2.242485, 1.488286, 4.9836664
                      ],
                      [
                          20.018322, -28.477297, -1.9027928, 1.5687338,
                          3.4994833, 1.4078381, 5.1036663
                      ],
                      [
                          28.21472, -16.502048, -1.7878747, 1.7497417,
                          3.791107, 1.488286, 0.6236664
                      ]]), Box3DMode.LIDAR, Box3DMode.CAM)
zhangwenwei's avatar
zhangwenwei committed
834
    boxes.translate(torch.tensor([0.13246193, 0.15701613, 0.0838056]))
zhangwenwei's avatar
zhangwenwei committed
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
    assert torch.allclose(boxes.tensor, expected_tensor)

    # test bbox in_range_bev
    expected_tensor = torch.tensor([1, 1, 1, 1, 1], dtype=torch.bool)
    mask = boxes.in_range_bev([0., -40., 70.4, 40.])
    assert (mask == expected_tensor).all()
    mask = boxes.nonempty()
    assert (mask == expected_tensor).all()

    # test bbox in_range
    expected_tensor = torch.tensor([1, 1, 0, 0, 0], dtype=torch.bool)
    mask = boxes.in_range_3d([-2, -5, 0, 20, 2, 22])
    assert (mask == expected_tensor).all()

    # test properties
    assert torch.allclose(boxes.bottom_center, boxes.tensor[:, :3])
    expected_tensor = (
        boxes.tensor[:, :3] - boxes.tensor[:, 3:6] *
        (torch.tensor([0.5, 1.0, 0.5]) - torch.tensor([0.5, 0.5, 0.5])))
    assert torch.allclose(boxes.gravity_center, expected_tensor)

    boxes.limit_yaw()
    assert (boxes.tensor[:, 6] <= np.pi / 2).all()
    assert (boxes.tensor[:, 6] >= -np.pi / 2).all()

    Box3DMode.convert(boxes, Box3DMode.LIDAR, Box3DMode.LIDAR)
    expected_tesor = boxes.tensor.clone()
    assert torch.allclose(expected_tesor, boxes.tensor)

    boxes.flip()
    boxes.flip()
    boxes.limit_yaw()
    assert torch.allclose(expected_tesor, boxes.tensor)

    # test nearest_bev
    # BEV box in lidar coordinates (x, y)
    lidar_expected_tensor = torch.tensor(
        [[-0.5763, -3.9307, 2.8326, -2.1709],
         [6.0819, -5.7075, 10.1143, -4.1589],
         [26.5212, -7.9800, 28.7637, -6.5018],
         [18.2686, -29.2617, 21.7681, -27.6929],
         [27.3398, -18.3976, 29.0896, -14.6065]])
    # BEV box in camera coordinate (-y, x)
    expected_tensor = lidar_expected_tensor.clone()
    expected_tensor[:, 0::2] = -lidar_expected_tensor[:, [3, 1]]
    expected_tensor[:, 1::2] = lidar_expected_tensor[:, 0::2]
    # the pytorch print loses some precision
    assert torch.allclose(
wuyuefeng's avatar
wuyuefeng committed
883
        boxes.nearest_bev, expected_tensor, rtol=1e-4, atol=1e-7)
zhangwenwei's avatar
zhangwenwei committed
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928

    # obtained by the print of the original implementation
    expected_tensor = torch.tensor([[[3.2684e+00, 2.5769e-01, -7.7767e-01],
                                     [1.6232e+00, 2.5769e-01, -1.5301e-01],
                                     [1.6232e+00, 1.9169e+00, -1.5301e-01],
                                     [3.2684e+00, 1.9169e+00, -7.7767e-01],
                                     [4.4784e+00, 2.5769e-01, 2.4093e+00],
                                     [2.8332e+00, 2.5769e-01, 3.0340e+00],
                                     [2.8332e+00, 1.9169e+00, 3.0340e+00],
                                     [4.4784e+00, 1.9169e+00, 2.4093e+00]],
                                    [[5.2427e+00, 2.2310e-01, 5.9606e+00],
                                     [3.7324e+00, 2.2310e-01, 6.3029e+00],
                                     [3.7324e+00, 1.8019e+00, 6.3029e+00],
                                     [5.2427e+00, 1.8019e+00, 5.9606e+00],
                                     [6.1340e+00, 2.2310e-01, 9.8933e+00],
                                     [4.6237e+00, 2.2310e-01, 1.0236e+01],
                                     [4.6237e+00, 1.8019e+00, 1.0236e+01],
                                     [6.1340e+00, 1.8019e+00, 9.8933e+00]],
                                    [[7.6525e+00, -2.0648e-02, 2.6364e+01],
                                     [6.2283e+00, -2.0648e-02, 2.6760e+01],
                                     [6.2283e+00, 1.4676e+00, 2.6760e+01],
                                     [7.6525e+00, 1.4676e+00, 2.6364e+01],
                                     [8.2534e+00, -2.0648e-02, 2.8525e+01],
                                     [6.8292e+00, -2.0648e-02, 2.8921e+01],
                                     [6.8292e+00, 1.4676e+00, 2.8921e+01],
                                     [8.2534e+00, 1.4676e+00, 2.8525e+01]],
                                    [[2.8535e+01, 4.9495e-01, 1.8102e+01],
                                     [2.7085e+01, 4.9495e-01, 1.8700e+01],
                                     [2.7085e+01, 1.9028e+00, 1.8700e+01],
                                     [2.8535e+01, 1.9028e+00, 1.8102e+01],
                                     [2.9870e+01, 4.9495e-01, 2.1337e+01],
                                     [2.8420e+01, 4.9495e-01, 2.1935e+01],
                                     [2.8420e+01, 1.9028e+00, 2.1935e+01],
                                     [2.9870e+01, 1.9028e+00, 2.1337e+01]],
                                    [[1.4452e+01, 2.9959e-01, 2.8612e+01],
                                     [1.5474e+01, 2.9959e-01, 3.0032e+01],
                                     [1.5474e+01, 1.7879e+00, 3.0032e+01],
                                     [1.4452e+01, 1.7879e+00, 2.8612e+01],
                                     [1.7530e+01, 2.9959e-01, 2.6398e+01],
                                     [1.8552e+01, 2.9959e-01, 2.7818e+01],
                                     [1.8552e+01, 1.7879e+00, 2.7818e+01],
                                     [1.7530e+01, 1.7879e+00, 2.6398e+01]]])

    # the pytorch print loses some precision
    assert torch.allclose(boxes.corners, expected_tensor, rtol=1e-4, atol=1e-7)
929

930
931
932
933
934
935
936
937
938
    # test init with a given origin
    boxes_origin_given = CameraInstance3DBoxes(
        th_boxes.clone(), box_dim=7, origin=(0.5, 0.5, 0.5))
    expected_tensor = th_boxes.clone()
    expected_tensor[:, :3] = th_boxes[:, :3] + th_boxes[:, 3:6] * (
        th_boxes.new_tensor((0.5, 1.0, 0.5)) - th_boxes.new_tensor(
            (0.5, 0.5, 0.5)))
    assert torch.allclose(boxes_origin_given.tensor, expected_tensor)

939
940

def test_boxes3d_overlaps():
941
942
943
944
945
    """Test the iou calculation of boxes in different modes.

    ComandLine:
        xdoctest tests/test_box3d.py::test_boxes3d_overlaps zero
    """
946
947
948
949
950
    if not torch.cuda.is_available():
        pytest.skip('test requires GPU and torch+cuda')

    # Test LiDAR boxes 3D overlaps
    boxes1_tensor = torch.tensor(
951
        [[1.8, -2.5, -1.8, 1.75, 3.39, 1.65, 1.6615927],
952
953
         [8.9, -2.5, -1.6, 1.54, 4.01, 1.57, 1.5215927],
         [28.3, 0.5, -1.3, 1.47, 2.23, 1.48, 4.7115927],
954
         [31.3, -8.2, -1.6, 1.74, 3.77, 1.48, 0.35]],
955
956
957
958
959
        device='cuda')
    boxes1 = LiDARInstance3DBoxes(boxes1_tensor)

    boxes2_tensor = torch.tensor([[1.2, -3.0, -1.9, 1.8, 3.4, 1.7, 1.9],
                                  [8.1, -2.9, -1.8, 1.5, 4.1, 1.6, 1.8],
960
961
                                  [31.3, -8.2, -1.6, 1.74, 3.77, 1.48, 0.35],
                                  [20.1, -28.5, -1.9, 1.6, 3.5, 1.4, 5.1]],
962
963
964
                                 device='cuda')
    boxes2 = LiDARInstance3DBoxes(boxes2_tensor)

liyinhao's avatar
liyinhao committed
965
    expected_iou_tensor = torch.tensor(
966
967
968
        [[0.3710, 0.0000, 0.0000, 0.0000], [0.0000, 0.3322, 0.0000, 0.0000],
         [0.0000, 0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 1.0000, 0.0000]],
        device='cuda')
liyinhao's avatar
liyinhao committed
969
970
971
972
973
974
975
976
977
978
979
    overlaps_3d_iou = boxes1.overlaps(boxes1, boxes2)
    assert torch.allclose(
        expected_iou_tensor, overlaps_3d_iou, rtol=1e-4, atol=1e-7)

    expected_iof_tensor = torch.tensor(
        [[0.5582, 0.0000, 0.0000, 0.0000], [0.0000, 0.5025, 0.0000, 0.0000],
         [0.0000, 0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 1.0000, 0.0000]],
        device='cuda')
    overlaps_3d_iof = boxes1.overlaps(boxes1, boxes2, mode='iof')
    assert torch.allclose(
        expected_iof_tensor, overlaps_3d_iof, rtol=1e-4, atol=1e-7)
980

liyinhao's avatar
liyinhao committed
981
982
983
984
985
    empty_boxes = []
    boxes3 = LiDARInstance3DBoxes(empty_boxes)
    overlaps_3d_empty = boxes1.overlaps(boxes3, boxes2)
    assert overlaps_3d_empty.shape[0] == 0
    assert overlaps_3d_empty.shape[1] == 4
986
987
988
989
990
991
992
993
994
995
    # Test camera boxes 3D overlaps
    cam_boxes1_tensor = Box3DMode.convert(boxes1_tensor, Box3DMode.LIDAR,
                                          Box3DMode.CAM)
    cam_boxes1 = CameraInstance3DBoxes(cam_boxes1_tensor)

    cam_boxes2_tensor = Box3DMode.convert(boxes2_tensor, Box3DMode.LIDAR,
                                          Box3DMode.CAM)
    cam_boxes2 = CameraInstance3DBoxes(cam_boxes2_tensor)
    cam_overlaps_3d = cam_boxes1.overlaps(cam_boxes1, cam_boxes2)

996
997
    # same boxes under different coordinates should have the same iou
    assert torch.allclose(
liyinhao's avatar
liyinhao committed
998
999
        expected_iou_tensor, cam_overlaps_3d, rtol=1e-4, atol=1e-7)
    assert torch.allclose(cam_overlaps_3d, overlaps_3d_iou)
1000
1001
1002
1003
1004

    with pytest.raises(AssertionError):
        cam_boxes1.overlaps(cam_boxes1, boxes1)
    with pytest.raises(AssertionError):
        boxes1.overlaps(cam_boxes1, boxes1)
wuyuefeng's avatar
wuyuefeng committed
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052


def test_depth_boxes3d():
    # test empty initialization
    empty_boxes = []
    boxes = DepthInstance3DBoxes(empty_boxes)
    assert boxes.tensor.shape[0] == 0
    assert boxes.tensor.shape[1] == 7

    # Test init with numpy array
    np_boxes = np.array(
        [[1.4856, 2.5299, -0.5570, 0.9385, 2.1404, 0.8954, 3.0601],
         [2.3262, 3.3065, --0.44255, 0.8234, 0.5325, 1.0099, 2.9971]],
        dtype=np.float32)
    boxes_1 = DepthInstance3DBoxes(np_boxes)
    assert torch.allclose(boxes_1.tensor, torch.from_numpy(np_boxes))

    # test properties

    assert boxes_1.volume.size(0) == 2
    assert (boxes_1.center == boxes_1.bottom_center).all()
    expected_tensor = torch.tensor([[1.4856, 2.5299, -0.1093],
                                    [2.3262, 3.3065, 0.9475]])
    assert torch.allclose(boxes_1.gravity_center, expected_tensor)
    expected_tensor = torch.tensor([[1.4856, 2.5299, 0.9385, 2.1404, 3.0601],
                                    [2.3262, 3.3065, 0.8234, 0.5325, 2.9971]])
    assert torch.allclose(boxes_1.bev, expected_tensor)
    expected_tensor = torch.tensor([[1.0164, 1.4597, 1.9548, 3.6001],
                                    [1.9145, 3.0402, 2.7379, 3.5728]])
    assert torch.allclose(boxes_1.nearest_bev, expected_tensor, 1e-4)
    assert repr(boxes) == (
        'DepthInstance3DBoxes(\n    tensor([], size=(0, 7)))')

    # test init with torch.Tensor
    th_boxes = torch.tensor(
        [[2.4593, 2.5870, -0.4321, 0.8597, 0.6193, 1.0204, 3.0693],
         [1.4856, 2.5299, -0.5570, 0.9385, 2.1404, 0.8954, 3.0601]],
        dtype=torch.float32)
    boxes_2 = DepthInstance3DBoxes(th_boxes)
    assert torch.allclose(boxes_2.tensor, th_boxes)

    # test clone/to/device
    boxes_2 = boxes_2.clone()
    boxes_1 = boxes_1.to(boxes_2.device)

    # test box concatenation
    expected_tensor = torch.tensor(
        [[1.4856, 2.5299, -0.5570, 0.9385, 2.1404, 0.8954, 3.0601],
1053
         [2.3262, 3.3065, 0.44255, 0.8234, 0.5325, 1.0099, 2.9971],
wuyuefeng's avatar
wuyuefeng committed
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
         [2.4593, 2.5870, -0.4321, 0.8597, 0.6193, 1.0204, 3.0693],
         [1.4856, 2.5299, -0.5570, 0.9385, 2.1404, 0.8954, 3.0601]])
    boxes = DepthInstance3DBoxes.cat([boxes_1, boxes_2])
    assert torch.allclose(boxes.tensor, expected_tensor)
    # concatenate empty list
    empty_boxes = DepthInstance3DBoxes.cat([])
    assert empty_boxes.tensor.shape[0] == 0
    assert empty_boxes.tensor.shape[-1] == 7

    # test box flip
liyinhao's avatar
liyinhao committed
1064
1065
1066
1067
1068
    points = torch.tensor([[0.6762, 1.2559, -1.4658, 2.5359],
                           [0.8784, 4.7814, -1.3857, 0.7167],
                           [-0.2517, 6.7053, -0.9697, 0.5599],
                           [0.5520, 0.6533, -0.5265, 1.0032],
                           [-0.5358, 4.5870, -1.4741, 0.0556]])
wuyuefeng's avatar
wuyuefeng committed
1069
1070
1071
1072
1073
    expected_tensor = torch.tensor(
        [[-1.4856, 2.5299, -0.5570, 0.9385, 2.1404, 0.8954, 0.0815],
         [-2.3262, 3.3065, 0.4426, 0.8234, 0.5325, 1.0099, 0.1445],
         [-2.4593, 2.5870, -0.4321, 0.8597, 0.6193, 1.0204, 0.0723],
         [-1.4856, 2.5299, -0.5570, 0.9385, 2.1404, 0.8954, 0.0815]])
liyinhao's avatar
liyinhao committed
1074
1075
1076
1077
1078
1079
    points = boxes.flip(bev_direction='horizontal', points=points)
    expected_points = torch.tensor([[-0.6762, 1.2559, -1.4658, 2.5359],
                                    [-0.8784, 4.7814, -1.3857, 0.7167],
                                    [0.2517, 6.7053, -0.9697, 0.5599],
                                    [-0.5520, 0.6533, -0.5265, 1.0032],
                                    [0.5358, 4.5870, -1.4741, 0.0556]])
wuyuefeng's avatar
wuyuefeng committed
1080
    assert torch.allclose(boxes.tensor, expected_tensor, 1e-3)
liyinhao's avatar
liyinhao committed
1081
    assert torch.allclose(points, expected_points)
wuyuefeng's avatar
wuyuefeng committed
1082
1083
1084
1085
1086
    expected_tensor = torch.tensor(
        [[-1.4856, -2.5299, -0.5570, 0.9385, 2.1404, 0.8954, -0.0815],
         [-2.3262, -3.3065, 0.4426, 0.8234, 0.5325, 1.0099, -0.1445],
         [-2.4593, -2.5870, -0.4321, 0.8597, 0.6193, 1.0204, -0.0723],
         [-1.4856, -2.5299, -0.5570, 0.9385, 2.1404, 0.8954, -0.0815]])
liyinhao's avatar
liyinhao committed
1087
1088
1089
1090
1091
1092
    points = boxes.flip(bev_direction='vertical', points=points)
    expected_points = torch.tensor([[-0.6762, -1.2559, -1.4658, 2.5359],
                                    [-0.8784, -4.7814, -1.3857, 0.7167],
                                    [0.2517, -6.7053, -0.9697, 0.5599],
                                    [-0.5520, -0.6533, -0.5265, 1.0032],
                                    [0.5358, -4.5870, -1.4741, 0.0556]])
wuyuefeng's avatar
wuyuefeng committed
1093
    assert torch.allclose(boxes.tensor, expected_tensor, 1e-3)
liyinhao's avatar
liyinhao committed
1094
    assert torch.allclose(points, expected_points)
1095

wuyuefeng's avatar
wuyuefeng committed
1096
    # test box rotation
1097
    # with input torch.Tensor points and angle
wuyuefeng's avatar
wuyuefeng committed
1098
1099
    boxes_rot = boxes.clone()
    expected_tensor = torch.tensor(
liyinhao's avatar
liyinhao committed
1100
1101
1102
1103
        [[-1.5434, -2.4951, -0.5570, 0.9385, 2.1404, 0.8954, -0.0585],
         [-2.4016, -3.2521, 0.4426, 0.8234, 0.5325, 1.0099, -0.1215],
         [-2.5181, -2.5298, -0.4321, 0.8597, 0.6193, 1.0204, -0.0493],
         [-1.5434, -2.4951, -0.5570, 0.9385, 2.1404, 0.8954, -0.0585]])
1104
    points, rot_mat_T = boxes_rot.rotate(-0.022998953275003075, points)
liyinhao's avatar
liyinhao committed
1105
1106
1107
1108
1109
1110
1111
1112
    expected_points = torch.tensor([[-0.7049, -1.2400, -1.4658, 2.5359],
                                    [-0.9881, -4.7599, -1.3857, 0.7167],
                                    [0.0974, -6.7093, -0.9697, 0.5599],
                                    [-0.5669, -0.6404, -0.5265, 1.0032],
                                    [0.4302, -4.5981, -1.4741, 0.0556]])
    expected_rot_mat_T = torch.tensor([[0.9997, -0.0230, 0.0000],
                                       [0.0230, 0.9997, 0.0000],
                                       [0.0000, 0.0000, 1.0000]])
wuyuefeng's avatar
wuyuefeng committed
1113
    assert torch.allclose(boxes_rot.tensor, expected_tensor, 1e-3)
liyinhao's avatar
liyinhao committed
1114
    assert torch.allclose(points, expected_points, 1e-3)
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
    assert torch.allclose(rot_mat_T, expected_rot_mat_T, 1e-3)

    # with input torch.Tensor points and rotation matrix
    points, rot_mat_T = boxes.rotate(0.022998953275003075, points)  # back
    rot_mat = np.array([[0.99973554, 0.02299693, 0.],
                        [-0.02299693, 0.99973554, 0.], [0., 0., 1.]])
    points, rot_mat_T = boxes.rotate(rot_mat, points)
    assert torch.allclose(boxes_rot.tensor, expected_tensor, 1e-3)
    assert torch.allclose(points, expected_points, 1e-3)
    assert torch.allclose(rot_mat_T, expected_rot_mat_T, 1e-3)
liyinhao's avatar
liyinhao committed
1125

1126
    # with input np.ndarray points and angle
liyinhao's avatar
liyinhao committed
1127
1128
1129
1130
1131
    points_np = np.array([[0.6762, 1.2559, -1.4658, 2.5359],
                          [0.8784, 4.7814, -1.3857, 0.7167],
                          [-0.2517, 6.7053, -0.9697, 0.5599],
                          [0.5520, 0.6533, -0.5265, 1.0032],
                          [-0.5358, 4.5870, -1.4741, 0.0556]])
1132
    points_np, rot_mat_T_np = boxes.rotate(-0.022998953275003075, points_np)
liyinhao's avatar
liyinhao committed
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
    expected_points_np = np.array([[0.7049, 1.2400, -1.4658, 2.5359],
                                   [0.9881, 4.7599, -1.3857, 0.7167],
                                   [-0.0974, 6.7093, -0.9697, 0.5599],
                                   [0.5669, 0.6404, -0.5265, 1.0032],
                                   [-0.4302, 4.5981, -1.4741, 0.0556]])
    expected_rot_mat_T_np = np.array([[0.9997, -0.0230, 0.0000],
                                      [0.0230, 0.9997, 0.0000],
                                      [0.0000, 0.0000, 1.0000]])
    expected_tensor = torch.tensor(
        [[-1.5434, -2.4951, -0.5570, 0.9385, 2.1404, 0.8954, -0.0585],
         [-2.4016, -3.2521, 0.4426, 0.8234, 0.5325, 1.0099, -0.1215],
         [-2.5181, -2.5298, -0.4321, 0.8597, 0.6193, 1.0204, -0.0493],
         [-1.5434, -2.4951, -0.5570, 0.9385, 2.1404, 0.8954, -0.0585]])
    assert torch.allclose(boxes.tensor, expected_tensor, 1e-3)
    assert np.allclose(points_np, expected_points_np, 1e-3)
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
    assert np.allclose(rot_mat_T_np, expected_rot_mat_T_np, 1e-3)

    # with input DepthPoints and rotation matrix
    points_np, rot_mat_T_np = boxes.rotate(0.022998953275003075, points_np)
    depth_points = DepthPoints(points_np, points_dim=4)
    depth_points, rot_mat_T_np = boxes.rotate(rot_mat, depth_points)
    points_np = depth_points.tensor.numpy()
    assert torch.allclose(boxes.tensor, expected_tensor, 1e-3)
    assert np.allclose(points_np, expected_points_np, 1e-3)
    assert np.allclose(rot_mat_T_np, expected_rot_mat_T_np, 1e-3)

wuyuefeng's avatar
wuyuefeng committed
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
    th_boxes = torch.tensor(
        [[0.61211395, 0.8129094, 0.10563634, 1.497534, 0.16927195, 0.27956772],
         [1.430009, 0.49797538, 0.9382923, 0.07694054, 0.9312509, 1.8919173]],
        dtype=torch.float32)
    boxes = DepthInstance3DBoxes(th_boxes, box_dim=6, with_yaw=False)
    expected_tensor = torch.tensor([[
        0.64884546, 0.78390356, 0.10563634, 1.50373348, 0.23795205, 0.27956772,
        0
    ],
                                    [
                                        1.45139421, 0.43169443, 0.93829232,
                                        0.11967964, 0.93380373, 1.89191735, 0
                                    ]])
    boxes_3 = boxes.clone()
    boxes_3.rotate(-0.04599790655000615)
    assert torch.allclose(boxes_3.tensor, expected_tensor)
    boxes.rotate(torch.tensor(-0.04599790655000615))
    assert torch.allclose(boxes.tensor, expected_tensor)

    # test bbox in_range_bev
    expected_tensor = torch.tensor([1, 1], dtype=torch.bool)
    mask = boxes.in_range_bev([0., -40., 70.4, 40.])
    assert (mask == expected_tensor).all()
    mask = boxes.nonempty()
    assert (mask == expected_tensor).all()

    expected_tensor = torch.tensor([[[-0.1030, 0.6649, 0.1056],
                                     [-0.1030, 0.6649, 0.3852],
                                     [-0.1030, 0.9029, 0.3852],
                                     [-0.1030, 0.9029, 0.1056],
                                     [1.4007, 0.6649, 0.1056],
                                     [1.4007, 0.6649, 0.3852],
                                     [1.4007, 0.9029, 0.3852],
                                     [1.4007, 0.9029, 0.1056]],
                                    [[1.3916, -0.0352, 0.9383],
                                     [1.3916, -0.0352, 2.8302],
                                     [1.3916, 0.8986, 2.8302],
                                     [1.3916, 0.8986, 0.9383],
                                     [1.5112, -0.0352, 0.9383],
                                     [1.5112, -0.0352, 2.8302],
                                     [1.5112, 0.8986, 2.8302],
                                     [1.5112, 0.8986, 0.9383]]])
    torch.allclose(boxes.corners, expected_tensor)
liyinhao's avatar
liyinhao committed
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211

    # test points in boxes
    if torch.cuda.is_available():
        box_idxs_of_pts = boxes.points_in_boxes(points.cuda())
        expected_idxs_of_pts = torch.tensor(
            [[0, 0], [0, 0], [0, 0], [0, 0], [0, 0]],
            device='cuda:0',
            dtype=torch.int32)
        assert torch.all(box_idxs_of_pts == expected_idxs_of_pts)

encore-zhou's avatar
encore-zhou committed
1212
1213
1214
1215
1216
1217
1218
    # test get_surface_line_center
    boxes = torch.tensor(
        [[0.3294, 1.0359, 0.1171, 1.0822, 1.1247, 1.3721, 0.4916],
         [-2.4630, -2.6324, -0.1616, 0.9202, 1.7896, 0.1992, 0.3185]])
    boxes = DepthInstance3DBoxes(
        boxes, box_dim=boxes.shape[-1], with_yaw=True, origin=(0.5, 0.5, 0.5))
    surface_center, line_center = boxes.get_surface_line_center()
1219

encore-zhou's avatar
encore-zhou committed
1220
1221
1222
    expected_surface_center = torch.tensor([[0.3294, 1.0359, 0.8031],
                                            [0.3294, 1.0359, -0.5689],
                                            [0.5949, 1.5317, 0.1171],
1223
                                            [0.1533, 0.5018, 0.1171],
encore-zhou's avatar
encore-zhou committed
1224
                                            [0.8064, 0.7805, 0.1171],
1225
1226
1227
1228
1229
1230
                                            [-0.1845, 1.2053, 0.1171],
                                            [-2.4630, -2.6324, -0.0620],
                                            [-2.4630, -2.6324, -0.2612],
                                            [-2.0406, -1.8436, -0.1616],
                                            [-2.7432, -3.4822, -0.1616],
                                            [-2.0574, -2.8496, -0.1616],
encore-zhou's avatar
encore-zhou committed
1231
1232
1233
                                            [-2.9000, -2.4883, -0.1616]])

    expected_line_center = torch.tensor([[0.8064, 0.7805, 0.8031],
1234
                                         [-0.1845, 1.2053, 0.8031],
encore-zhou's avatar
encore-zhou committed
1235
                                         [0.5949, 1.5317, 0.8031],
1236
                                         [0.1533, 0.5018, 0.8031],
encore-zhou's avatar
encore-zhou committed
1237
                                         [0.8064, 0.7805, -0.5689],
1238
                                         [-0.1845, 1.2053, -0.5689],
encore-zhou's avatar
encore-zhou committed
1239
                                         [0.5949, 1.5317, -0.5689],
1240
                                         [0.1533, 0.5018, -0.5689],
encore-zhou's avatar
encore-zhou committed
1241
                                         [1.0719, 1.2762, 0.1171],
1242
                                         [0.6672, 0.3324, 0.1171],
encore-zhou's avatar
encore-zhou committed
1243
                                         [0.1178, 1.7871, 0.1171],
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
                                         [-0.3606, 0.6713, 0.1171],
                                         [-2.0574, -2.8496, -0.0620],
                                         [-2.9000, -2.4883, -0.0620],
                                         [-2.0406, -1.8436, -0.0620],
                                         [-2.7432, -3.4822, -0.0620],
                                         [-2.0574, -2.8496, -0.2612],
                                         [-2.9000, -2.4883, -0.2612],
                                         [-2.0406, -1.8436, -0.2612],
                                         [-2.7432, -3.4822, -0.2612],
                                         [-1.6350, -2.0607, -0.1616],
                                         [-2.3062, -3.6263, -0.1616],
                                         [-2.4462, -1.6264, -0.1616],
encore-zhou's avatar
encore-zhou committed
1256
1257
1258
1259
1260
                                         [-3.1802, -3.3381, -0.1616]])

    assert torch.allclose(surface_center, expected_surface_center, atol=1e-04)
    assert torch.allclose(line_center, expected_line_center, atol=1e-04)

liyinhao's avatar
liyinhao committed
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326

def test_rotation_3d_in_axis():
    points = torch.tensor([[[-0.4599, -0.0471, 0.0000],
                            [-0.4599, -0.0471, 1.8433],
                            [-0.4599, 0.0471, 1.8433]],
                           [[-0.2555, -0.2683, 0.0000],
                            [-0.2555, -0.2683, 0.9072],
                            [-0.2555, 0.2683, 0.9072]]])
    rotated = rotation_3d_in_axis(
        points, torch.tensor([-np.pi / 10, np.pi / 10]), axis=0)
    expected_rotated = torch.tensor([[[0.0000, -0.4228, -0.1869],
                                      [1.8433, -0.4228, -0.1869],
                                      [1.8433, -0.4519, -0.0973]],
                                     [[0.0000, -0.3259, -0.1762],
                                      [0.9072, -0.3259, -0.1762],
                                      [0.9072, -0.1601, 0.3341]]])
    assert torch.allclose(rotated, expected_rotated, 1e-3)


def test_limit_period():
    torch.manual_seed(0)
    val = torch.rand([5, 1])
    result = limit_period(val)
    expected_result = torch.tensor([[0.4963], [0.7682], [0.0885], [0.1320],
                                    [0.3074]])
    assert torch.allclose(result, expected_result, 1e-3)


def test_xywhr2xyxyr():
    torch.manual_seed(0)
    xywhr = torch.tensor([[1., 2., 3., 4., 5.], [0., 1., 2., 3., 4.]])
    xyxyr = xywhr2xyxyr(xywhr)
    expected_xyxyr = torch.tensor([[-0.5000, 0.0000, 2.5000, 4.0000, 5.0000],
                                   [-1.0000, -0.5000, 1.0000, 2.5000, 4.0000]])

    assert torch.allclose(xyxyr, expected_xyxyr)


class test_get_box_type(unittest.TestCase):

    def test_get_box_type(self):
        box_type_3d, box_mode_3d = get_box_type('camera')
        assert box_type_3d == CameraInstance3DBoxes
        assert box_mode_3d == Box3DMode.CAM

        box_type_3d, box_mode_3d = get_box_type('depth')
        assert box_type_3d == DepthInstance3DBoxes
        assert box_mode_3d == Box3DMode.DEPTH

        box_type_3d, box_mode_3d = get_box_type('lidar')
        assert box_type_3d == LiDARInstance3DBoxes
        assert box_mode_3d == Box3DMode.LIDAR

    def test_bad_box_type(self):
        self.assertRaises(ValueError, get_box_type, 'test')


def test_points_cam2img():
    torch.manual_seed(0)
    points = torch.rand([5, 3])
    proj_mat = torch.rand([4, 4])
    point_2d_res = points_cam2img(points, proj_mat)
    expected_point_2d_res = torch.tensor([[0.5832, 0.6496], [0.6146, 0.7910],
                                          [0.6994, 0.7782], [0.5623, 0.6303],
                                          [0.4359, 0.6532]])
    assert torch.allclose(point_2d_res, expected_point_2d_res, 1e-3)