Commit 2eebdc2d authored by Yezhen Cong's avatar Yezhen Cong Committed by Tai-Wang
Browse files

[Refactor] Main code modification for coordinate system refactor (#677)

parent 26ab7ff2
...@@ -132,8 +132,12 @@ def test_object_noise(): ...@@ -132,8 +132,12 @@ def test_object_noise():
input_dict = object_noise(input_dict) input_dict = object_noise(input_dict)
points = input_dict['points'] points = input_dict['points']
gt_bboxes_3d = input_dict['gt_bboxes_3d'].tensor gt_bboxes_3d = input_dict['gt_bboxes_3d'].tensor
expected_gt_bboxes_3d = torch.tensor(
[[9.1724, -1.7559, -1.3550, 0.4800, 1.2000, 1.8900, 0.0505]]) # coord sys refactor (lidar2cam)
expected_gt_bboxes_3d = torch.tensor([[
9.1724, -1.7559, -1.3550, 1.2000, 0.4800, 1.8900,
0.0505 - float(rots) * 2 - np.pi / 2
]])
repr_str = repr(object_noise) repr_str = repr(object_noise)
expected_repr_str = 'ObjectNoise(num_try=100, ' \ expected_repr_str = 'ObjectNoise(num_try=100, ' \
'translation_std=[0.25, 0.25, 0.25], ' \ 'translation_std=[0.25, 0.25, 0.25], ' \
...@@ -522,11 +526,11 @@ def test_random_flip_3d(): ...@@ -522,11 +526,11 @@ def test_random_flip_3d():
[21.2334, -9.3607, -0.2588, 0.0000], [21.2334, -9.3607, -0.2588, 0.0000],
[21.2179, -9.4372, -0.2598, 0.0000]]) [21.2179, -9.4372, -0.2598, 0.0000]])
expected_gt_bboxes_3d = torch.tensor( expected_gt_bboxes_3d = torch.tensor(
[[38.9229, -18.4417, -1.1459, 0.7100, 1.7600, 1.8600, 5.4068], [[38.9229, -18.4417, -1.1459, 0.7100, 1.7600, 1.8600, 2.2652],
[12.7768, -0.5795, -2.2682, 0.5700, 0.9900, 1.7200, 5.6445], [12.7768, -0.5795, -2.2682, 0.5700, 0.9900, 1.7200, 2.5029],
[12.7557, -2.2996, -1.4869, 0.6100, 1.1100, 1.9000, 5.0806], [12.7557, -2.2996, -1.4869, 0.6100, 1.1100, 1.9000, 1.9390],
[10.6677, -0.8064, -1.5435, 0.7900, 0.9600, 1.7900, 2.0560], [10.6677, -0.8064, -1.5435, 0.7900, 0.9600, 1.7900, -1.0856],
[5.0903, -5.1004, -1.2694, 0.7100, 1.7000, 1.8300, 5.0552]]) [5.0903, -5.1004, -1.2694, 0.7100, 1.7000, 1.8300, 1.9136]])
repr_str = repr(random_flip_3d) repr_str = repr(random_flip_3d)
expected_repr_str = 'RandomFlip3D(sync_2d=True,' \ expected_repr_str = 'RandomFlip3D(sync_2d=True,' \
' flip_ratio_bev_vertical=1.0)' ' flip_ratio_bev_vertical=1.0)'
......
...@@ -316,10 +316,24 @@ def test_sunrgbd_pipeline(): ...@@ -316,10 +316,24 @@ def test_sunrgbd_pipeline():
[0.8636, 1.3511, 0.0504, 0.0304], [0.8636, 1.3511, 0.0504, 0.0304],
[0.8690, 1.3461, 0.1265, 0.1065], [0.8690, 1.3461, 0.1265, 0.1065],
[0.8668, 1.3434, 0.1216, 0.1017]]) [0.8668, 1.3434, 0.1216, 0.1017]])
# Depth coordinate system update: only yaw changes since rotation in depth
# is counter-clockwise and yaw angle is clockwise originally
# But heading angles in sunrgbd data also reverses the sign
# and after horizontal flip the sign reverse again
rotation_angle = info['annos']['rotation_y']
expected_gt_bboxes_3d = torch.tensor( expected_gt_bboxes_3d = torch.tensor(
[[-1.2136, 4.0206, -0.2412, 2.2493, 1.8444, 1.9245, 1.3989], [[
[-2.7420, 4.5777, -0.7686, 0.5718, 0.8629, 0.9510, 1.4446], -1.2136, 4.0206, -0.2412, 2.2493, 1.8444, 1.9245,
[0.9729, 1.9087, -0.1443, 0.6965, 1.5273, 2.0563, 2.9924]]) 1.3989 + 0.047001579467984445 * 2 - 2 * rotation_angle[0]
],
[
-2.7420, 4.5777, -0.7686, 0.5718, 0.8629, 0.9510,
1.4446 + 0.047001579467984445 * 2 - 2 * rotation_angle[1]
],
[
0.9729, 1.9087, -0.1443, 0.6965, 1.5273, 2.0563,
2.9924 + 0.047001579467984445 * 2 - 2 * rotation_angle[2]
]]).float()
expected_gt_labels_3d = np.array([0, 7, 6]) expected_gt_labels_3d = np.array([0, 7, 6])
assert torch.allclose(gt_bboxes_3d.tensor, expected_gt_bboxes_3d, 1e-3) assert torch.allclose(gt_bboxes_3d.tensor, expected_gt_bboxes_3d, 1e-3)
assert np.allclose(gt_labels_3d.flatten(), expected_gt_labels_3d) assert np.allclose(gt_labels_3d.flatten(), expected_gt_labels_3d)
......
...@@ -38,63 +38,64 @@ def test_outdoor_aug_pipeline(): ...@@ -38,63 +38,64 @@ def test_outdoor_aug_pipeline():
] ]
pipeline = Compose(train_pipeline) pipeline = Compose(train_pipeline)
# coord sys refactor: reverse sign of yaw
gt_bboxes_3d = LiDARInstance3DBoxes( gt_bboxes_3d = LiDARInstance3DBoxes(
torch.tensor([ torch.tensor([
[ [
2.16902428e+01, -4.06038128e-02, -1.61906636e+00, 2.16902428e+01, -4.06038128e-02, -1.61906636e+00,
1.65999997e+00, 3.20000005e+00, 1.61000001e+00, -1.53999996e+00 1.65999997e+00, 3.20000005e+00, 1.61000001e+00, 1.53999996e+00
], ],
[ [
7.05006886e+00, -6.57459593e+00, -1.60107934e+00, 7.05006886e+00, -6.57459593e+00, -1.60107934e+00,
2.27999997e+00, 1.27799997e+01, 3.66000009e+00, 1.54999995e+00 2.27999997e+00, 1.27799997e+01, 3.66000009e+00, -1.54999995e+00
], ],
[ [
2.24698811e+01, -6.69203758e+00, -1.50118136e+00, 2.24698811e+01, -6.69203758e+00, -1.50118136e+00,
2.31999993e+00, 1.47299995e+01, 3.64000010e+00, 1.59000003e+00 2.31999993e+00, 1.47299995e+01, 3.64000010e+00, -1.59000003e+00
], ],
[ [
3.48291969e+01, -7.09058380e+00, -1.36622977e+00, 3.48291969e+01, -7.09058380e+00, -1.36622977e+00,
2.31999993e+00, 1.00400000e+01, 3.60999990e+00, 1.61000001e+00 2.31999993e+00, 1.00400000e+01, 3.60999990e+00, -1.61000001e+00
], ],
[ [
4.62394600e+01, -7.75838804e+00, -1.32405007e+00, 4.62394600e+01, -7.75838804e+00, -1.32405007e+00,
2.33999991e+00, 1.28299999e+01, 3.63000011e+00, 1.63999999e+00 2.33999991e+00, 1.28299999e+01, 3.63000011e+00, -1.63999999e+00
], ],
[ [
2.82966995e+01, -5.55755794e-01, -1.30332506e+00, 2.82966995e+01, -5.55755794e-01, -1.30332506e+00,
1.47000003e+00, 2.23000002e+00, 1.48000002e+00, -1.57000005e+00 1.47000003e+00, 2.23000002e+00, 1.48000002e+00, 1.57000005e+00
], ],
[ [
2.66690197e+01, 2.18230209e+01, -1.73605704e+00, 2.66690197e+01, 2.18230209e+01, -1.73605704e+00,
1.55999994e+00, 3.48000002e+00, 1.39999998e+00, -1.69000006e+00 1.55999994e+00, 3.48000002e+00, 1.39999998e+00, 1.69000006e+00
], ],
[ [
3.13197803e+01, 8.16214371e+00, -1.62177873e+00, 3.13197803e+01, 8.16214371e+00, -1.62177873e+00,
1.74000001e+00, 3.76999998e+00, 1.48000002e+00, 2.78999996e+00 1.74000001e+00, 3.76999998e+00, 1.48000002e+00, -2.78999996e+00
], ],
[ [
4.34395561e+01, -1.95209332e+01, -1.20757008e+00, 4.34395561e+01, -1.95209332e+01, -1.20757008e+00,
1.69000006e+00, 4.09999990e+00, 1.40999997e+00, -1.53999996e+00 1.69000006e+00, 4.09999990e+00, 1.40999997e+00, 1.53999996e+00
], ],
[ [
3.29882965e+01, -3.79360509e+00, -1.69245458e+00, 3.29882965e+01, -3.79360509e+00, -1.69245458e+00,
1.74000001e+00, 4.09000015e+00, 1.49000001e+00, -1.52999997e+00 1.74000001e+00, 4.09000015e+00, 1.49000001e+00, 1.52999997e+00
], ],
[ [
3.85469360e+01, 8.35060215e+00, -1.31423414e+00, 3.85469360e+01, 8.35060215e+00, -1.31423414e+00,
1.59000003e+00, 4.28000021e+00, 1.45000005e+00, 1.73000002e+00 1.59000003e+00, 4.28000021e+00, 1.45000005e+00, -1.73000002e+00
], ],
[ [
2.22492104e+01, -1.13536005e+01, -1.38272512e+00, 2.22492104e+01, -1.13536005e+01, -1.38272512e+00,
1.62000000e+00, 3.55999994e+00, 1.71000004e+00, 2.48000002e+00 1.62000000e+00, 3.55999994e+00, 1.71000004e+00, -2.48000002e+00
], ],
[ [
3.36115799e+01, -1.97708054e+01, -4.92827654e-01, 3.36115799e+01, -1.97708054e+01, -4.92827654e-01,
1.64999998e+00, 3.54999995e+00, 1.79999995e+00, -1.57000005e+00 1.64999998e+00, 3.54999995e+00, 1.79999995e+00, 1.57000005e+00
], ],
[ [
9.85029602e+00, -1.51294518e+00, -1.66834795e+00, 9.85029602e+00, -1.51294518e+00, -1.66834795e+00,
1.59000003e+00, 3.17000008e+00, 1.38999999e+00, -8.39999974e-01 1.59000003e+00, 3.17000008e+00, 1.38999999e+00, 8.39999974e-01
] ]
], ],
dtype=torch.float32)) dtype=torch.float32))
...@@ -105,23 +106,59 @@ def test_outdoor_aug_pipeline(): ...@@ -105,23 +106,59 @@ def test_outdoor_aug_pipeline():
bbox3d_fields=[], bbox3d_fields=[],
img_fields=[]) img_fields=[])
origin_center = gt_bboxes_3d.tensor[:, :3].clone()
origin_angle = gt_bboxes_3d.tensor[:, 6].clone()
output = pipeline(results) output = pipeline(results)
# manually go through the pipeline
rotation_angle = output['img_metas']._data['pcd_rotation_angle']
rotation_matrix = output['img_metas']._data['pcd_rotation']
noise_angle = torch.tensor([
0.70853819, -0.19160091, -0.71116999, 0.49571753, -0.12447527,
-0.4690133, -0.34776965, -0.65692282, -0.52442831, -0.01575567,
-0.61849673, 0.6572608, 0.30312288, -0.19182971
])
noise_trans = torch.tensor([[1.7641e+00, 4.0016e-01, 4.8937e-01],
[-1.3065e+00, 1.6581e+00, -5.9082e-02],
[-1.5504e+00, 4.1732e-01, -4.7218e-01],
[-5.2158e-01, -1.1847e+00, 4.8035e-01],
[-8.9637e-01, -1.9627e+00, 7.9241e-01],
[1.3240e-02, -1.2194e-01, 1.6953e-01],
[8.1798e-01, -2.7891e-01, 7.1578e-01],
[-4.1733e-04, 3.7416e-01, 2.0478e-01],
[1.5218e-01, -3.7413e-01, -6.7257e-03],
[-1.9138e+00, -2.2855e+00, -8.0092e-01],
[1.5933e+00, 5.6872e-01, -5.7244e-02],
[-1.8523e+00, -7.1333e-01, -8.8111e-01],
[5.2678e-01, 1.0106e-01, -1.9432e-01],
[-7.2449e-01, -8.0292e-01, -1.1334e-02]])
angle = -origin_angle - noise_angle + torch.tensor(rotation_angle)
angle -= 2 * np.pi * (angle >= np.pi)
angle += 2 * np.pi * (angle < -np.pi)
scale = output['img_metas']._data['pcd_scale_factor']
expected_tensor = torch.tensor( expected_tensor = torch.tensor(
[[20.6514, -8.8250, -1.0816, 1.5893, 3.0637, 1.5414, -1.9216], [[20.6514, -8.8250, -1.0816, 1.5893, 3.0637, 1.5414],
[7.9374, 4.9457, -1.2008, 2.1829, 12.2357, 3.5041, 1.6629], [7.9374, 4.9457, -1.2008, 2.1829, 12.2357, 3.5041],
[20.8115, -2.0273, -1.8893, 2.2212, 14.1026, 3.4850, 2.6513], [20.8115, -2.0273, -1.8893, 2.2212, 14.1026, 3.4850],
[32.3850, -5.2135, -1.1321, 2.2212, 9.6124, 3.4562, 2.6498], [32.3850, -5.2135, -1.1321, 2.2212, 9.6124, 3.4562],
[43.7022, -7.8316, -0.5090, 2.2403, 12.2836, 3.4754, 2.0146], [43.7022, -7.8316, -0.5090, 2.2403, 12.2836, 3.4754],
[25.3300, -9.6670, -1.0855, 1.4074, 2.1350, 1.4170, -0.7141], [25.3300, -9.6670, -1.0855, 1.4074, 2.1350, 1.4170],
[16.5414, -29.0583, -0.9768, 1.4936, 3.3318, 1.3404, -0.7153], [16.5414, -29.0583, -0.9768, 1.4936, 3.3318, 1.3404],
[24.6548, -18.9226, -1.3567, 1.6659, 3.6094, 1.4170, 1.3970], [24.6548, -18.9226, -1.3567, 1.6659, 3.6094, 1.4170],
[45.8403, 1.8183, -1.1626, 1.6180, 3.9254, 1.3499, -0.6886], [45.8403, 1.8183, -1.1626, 1.6180, 3.9254, 1.3499],
[30.6288, -8.4497, -1.4881, 1.6659, 3.9158, 1.4265, -0.7241], [30.6288, -8.4497, -1.4881, 1.6659, 3.9158, 1.4265],
[32.3316, -22.4611, -1.3131, 1.5223, 4.0977, 1.3882, 2.4186], [32.3316, -22.4611, -1.3131, 1.5223, 4.0977, 1.3882],
[22.4492, 3.2944, -2.1674, 1.5510, 3.4084, 1.6372, 0.3928], [22.4492, 3.2944, -2.1674, 1.5510, 3.4084, 1.6372],
[37.3824, 5.0472, -0.6579, 1.5797, 3.3988, 1.7233, -1.4862], [37.3824, 5.0472, -0.6579, 1.5797, 3.3988, 1.7233],
[8.9259, -1.2578, -1.6081, 1.5223, 3.0350, 1.3308, -1.7212]]) [8.9259, -1.2578, -1.6081, 1.5223, 3.0350, 1.3308]])
expected_tensor[:, :3] = ((
(origin_center + noise_trans) * torch.tensor([1, -1, 1]))
@ rotation_matrix) * scale
expected_tensor = torch.cat([expected_tensor, angle.unsqueeze(-1)], dim=-1)
assert torch.allclose( assert torch.allclose(
output['gt_bboxes_3d']._data.tensor, expected_tensor, atol=1e-3) output['gt_bboxes_3d']._data.tensor, expected_tensor, atol=1e-3)
...@@ -208,6 +245,11 @@ def test_outdoor_velocity_aug_pipeline(): ...@@ -208,6 +245,11 @@ def test_outdoor_velocity_aug_pipeline():
bbox3d_fields=[], bbox3d_fields=[],
img_fields=[]) img_fields=[])
origin_center = gt_bboxes_3d.tensor[:, :3].clone()
origin_angle = gt_bboxes_3d.tensor[:, 6].clone(
) # TODO: ObjectNoise modifies tensor!!
origin_velo = gt_bboxes_3d.tensor[:, 7:9].clone()
output = pipeline(results) output = pipeline(results)
expected_tensor = torch.tensor( expected_tensor = torch.tensor(
...@@ -247,5 +289,21 @@ def test_outdoor_velocity_aug_pipeline(): ...@@ -247,5 +289,21 @@ def test_outdoor_velocity_aug_pipeline():
-4.4522e+00, -2.9166e+01, -7.8938e-01, 2.2841e+00, 3.8348e+00, -4.4522e+00, -2.9166e+01, -7.8938e-01, 2.2841e+00, 3.8348e+00,
1.5925e+00, 1.4721e+00, -7.8371e-03, -8.1931e-03 1.5925e+00, 1.4721e+00, -7.8371e-03, -8.1931e-03
]]) ]])
# coord sys refactor (manually go through pipeline)
rotation_angle = output['img_metas']._data['pcd_rotation_angle']
rotation_matrix = output['img_metas']._data['pcd_rotation']
expected_tensor[:, :3] = ((origin_center @ rotation_matrix) *
output['img_metas']._data['pcd_scale_factor'] *
torch.tensor([1, -1, 1]))[[
0, 1, 2, 3, 4, 5, 6, 7, 9
]]
angle = -origin_angle - rotation_angle
angle -= 2 * np.pi * (angle >= np.pi)
angle += 2 * np.pi * (angle < -np.pi)
expected_tensor[:, 6:7] = angle.unsqueeze(-1)[[0, 1, 2, 3, 4, 5, 6, 7, 9]]
expected_tensor[:,
7:9] = ((origin_velo @ rotation_matrix[:2, :2]) *
output['img_metas']._data['pcd_scale_factor'] *
torch.tensor([1, -1]))[[0, 1, 2, 3, 4, 5, 6, 7, 9]]
assert torch.allclose( assert torch.allclose(
output['gt_bboxes_3d']._data.tensor, expected_tensor, atol=1e-3) output['gt_bboxes_3d']._data.tensor, expected_tensor, atol=1e-3)
# Copyright (c) OpenMMLab. All rights reserved. # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import pytest import pytest
import torch import torch
...@@ -16,8 +17,8 @@ def test_RoIAwarePool3d(): ...@@ -16,8 +17,8 @@ def test_RoIAwarePool3d():
roiaware_pool3d_avg = RoIAwarePool3d( roiaware_pool3d_avg = RoIAwarePool3d(
out_size=4, max_pts_per_voxel=128, mode='avg') out_size=4, max_pts_per_voxel=128, mode='avg')
rois = torch.tensor( rois = torch.tensor(
[[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 0.3], [[1.0, 2.0, 3.0, 5.0, 4.0, 6.0, -0.3 - np.pi / 2],
[-10.0, 23.0, 16.0, 10, 20, 20, 0.5]], [-10.0, 23.0, 16.0, 20.0, 10.0, 20.0, -0.5 - np.pi / 2]],
dtype=torch.float32).cuda( dtype=torch.float32).cuda(
) # boxes (m, 7) with bottom center in lidar coordinate ) # boxes (m, 7) with bottom center in lidar coordinate
pts = torch.tensor( pts = torch.tensor(
...@@ -64,6 +65,17 @@ def test_points_in_boxes_gpu(): ...@@ -64,6 +65,17 @@ def test_points_in_boxes_gpu():
assert point_indices.shape == torch.Size([2, 8]) assert point_indices.shape == torch.Size([2, 8])
assert (point_indices == expected_point_indices).all() assert (point_indices == expected_point_indices).all()
boxes = torch.tensor([[[0.0, 0.0, 0.0, 1.0, 20.0, 1.0, 0.523598]]],
dtype=torch.float32).cuda() # 30 degrees
pts = torch.tensor(
[[[4, 6.928, 0], [6.928, 4, 0], [4, -6.928, 0], [6.928, -4, 0],
[-4, 6.928, 0], [-6.928, 4, 0], [-4, -6.928, 0], [-6.928, -4, 0]]],
dtype=torch.float32).cuda()
point_indices = points_in_boxes_gpu(points=pts, boxes=boxes)
expected_point_indices = torch.tensor([[-1, -1, 0, -1, 0, -1, -1, -1]],
dtype=torch.int32).cuda()
assert (point_indices == expected_point_indices).all()
if torch.cuda.device_count() > 1: if torch.cuda.device_count() > 1:
pts = pts.to('cuda:1') pts = pts.to('cuda:1')
boxes = boxes.to('cuda:1') boxes = boxes.to('cuda:1')
...@@ -75,23 +87,35 @@ def test_points_in_boxes_gpu(): ...@@ -75,23 +87,35 @@ def test_points_in_boxes_gpu():
def test_points_in_boxes_cpu(): def test_points_in_boxes_cpu():
boxes = torch.tensor( boxes = torch.tensor(
[[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 0.3], [[[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 0.3],
[-10.0, 23.0, 16.0, 10, 20, 20, 0.5]], [-10.0, 23.0, 16.0, 10, 20, 20, 0.5]]],
dtype=torch.float32 dtype=torch.float32
) # boxes (m, 7) with bottom center in lidar coordinate ) # boxes (m, 7) with bottom center in lidar coordinate
pts = torch.tensor( pts = torch.tensor(
[[1, 2, 3.3], [1.2, 2.5, 3.0], [0.8, 2.1, 3.5], [1.6, 2.6, 3.6], [[[1, 2, 3.3], [1.2, 2.5, 3.0], [0.8, 2.1, 3.5], [1.6, 2.6, 3.6],
[0.8, 1.2, 3.9], [-9.2, 21.0, 18.2], [3.8, 7.9, 6.3], [0.8, 1.2, 3.9], [-9.2, 21.0, 18.2], [3.8, 7.9, 6.3],
[4.7, 3.5, -12.2], [3.8, 7.6, -2], [-10.6, -12.9, -20], [-16, -18, 9], [4.7, 3.5, -12.2], [3.8, 7.6, -2], [-10.6, -12.9, -20], [
[-21.3, -52, -5], [0, 0, 0], [6, 7, 8], [-2, -3, -4]], -16, -18, 9
], [-21.3, -52, -5], [0, 0, 0], [6, 7, 8], [-2, -3, -4]]],
dtype=torch.float32) # points (n, 3) in lidar coordinate dtype=torch.float32) # points (n, 3) in lidar coordinate
point_indices = points_in_boxes_cpu(points=pts, boxes=boxes) point_indices = points_in_boxes_cpu(points=pts, boxes=boxes)
expected_point_indices = torch.tensor( expected_point_indices = torch.tensor(
[[1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [[[1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [0, 1], [0, 0], [0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0]], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0]]],
dtype=torch.int32) dtype=torch.int32)
assert point_indices.shape == torch.Size([2, 15]) assert point_indices.shape == torch.Size([1, 15, 2])
assert (point_indices == expected_point_indices).all()
boxes = torch.tensor([[[0.0, 0.0, 0.0, 1.0, 20.0, 1.0, 0.523598]]],
dtype=torch.float32) # 30 degrees
pts = torch.tensor(
[[[4, 6.928, 0], [6.928, 4, 0], [4, -6.928, 0], [6.928, -4, 0],
[-4, 6.928, 0], [-6.928, 4, 0], [-4, -6.928, 0], [-6.928, -4, 0]]],
dtype=torch.float32)
point_indices = points_in_boxes_cpu(points=pts, boxes=boxes)
expected_point_indices = torch.tensor(
[[[0], [0], [1], [0], [1], [0], [0], [0]]], dtype=torch.int32)
assert (point_indices == expected_point_indices).all() assert (point_indices == expected_point_indices).all()
......
...@@ -76,7 +76,7 @@ def test_loss(): ...@@ -76,7 +76,7 @@ def test_loss():
2.0579e-02, 1.5005e-04, 3.5252e-05, 0.0000e+00, 2.0433e-05, 1.5422e-05 2.0579e-02, 1.5005e-04, 3.5252e-05, 0.0000e+00, 2.0433e-05, 1.5422e-05
]) ])
expected_loss_bbox = torch.as_tensor(0.0622) expected_loss_bbox = torch.as_tensor(0.0622)
expected_loss_corner = torch.Tensor([0.1379]) expected_loss_corner = torch.Tensor([0.1374])
assert torch.allclose(loss['loss_cls'], expected_loss_cls, 1e-3) assert torch.allclose(loss['loss_cls'], expected_loss_cls, 1e-3)
assert torch.allclose(loss['loss_bbox'], expected_loss_bbox, 1e-3) assert torch.allclose(loss['loss_bbox'], expected_loss_bbox, 1e-3)
...@@ -201,7 +201,7 @@ def test_get_targets(): ...@@ -201,7 +201,7 @@ def test_get_targets():
]) ])
expected_bbox_targets = torch.Tensor( expected_bbox_targets = torch.Tensor(
[[0.0805, 0.0130, 0.0047, 0.0542, -0.2252, 0.0299, -0.1495]]) [[-0.0632, 0.0516, 0.0047, 0.0542, -0.2252, 0.0299, -0.1495]])
expected_pos_gt_bboxes = torch.Tensor( expected_pos_gt_bboxes = torch.Tensor(
[[7.8417, -0.1405, -1.9652, 1.6122, 3.2838, 1.5331, -2.0835]]) [[7.8417, -0.1405, -1.9652, 1.6122, 3.2838, 1.5331, -2.0835]])
...@@ -345,12 +345,11 @@ def test_get_bboxes(): ...@@ -345,12 +345,11 @@ def test_get_bboxes():
selected_bboxes, selected_scores, selected_label_preds = result_list[0] selected_bboxes, selected_scores, selected_label_preds = result_list[0]
expected_selected_bboxes = torch.Tensor( expected_selected_bboxes = torch.Tensor(
[[56.2170, 25.9074, -1.3610, 1.6025, 3.6730, 1.5128, -0.1179], [[56.0888, 25.6445, -1.3610, 1.6025, 3.6730, 1.5128, -0.1179],
[54.6521, 28.8846, -1.9145, 1.6362, 4.0573, 1.5599, -1.7335], [54.4606, 29.2412, -1.9145, 1.6362, 4.0573, 1.5599, -1.7335],
[31.6179, -5.6004, -1.2470, 1.6458, 4.1622, 1.5632, -1.5734]]).cuda() [31.8887, -5.8574, -1.2470, 1.6458, 4.1622, 1.5632, -1.5734]]).cuda()
expected_selected_scores = torch.Tensor([-2.2061, -2.1121, -0.1761]).cuda() expected_selected_scores = torch.Tensor([-2.2061, -2.1121, -0.1761]).cuda()
expected_selected_label_preds = torch.Tensor([2., 2., 2.]).cuda() expected_selected_label_preds = torch.Tensor([2., 2., 2.]).cuda()
assert torch.allclose(selected_bboxes.tensor, expected_selected_bboxes, assert torch.allclose(selected_bboxes.tensor, expected_selected_bboxes,
1e-3) 1e-3)
assert torch.allclose(selected_scores, expected_selected_scores, 1e-3) assert torch.allclose(selected_scores, expected_selected_scores, 1e-3)
...@@ -387,43 +386,43 @@ def test_multi_class_nms(): ...@@ -387,43 +386,43 @@ def test_multi_class_nms():
box_preds = torch.Tensor( box_preds = torch.Tensor(
[[ [[
5.6217e+01, 2.5908e+01, -1.3611e+00, 1.6025e+00, 3.6730e+00, 5.6217e+01, 2.5908e+01, -1.3611e+00, 1.6025e+00, 3.6730e+00,
1.5129e+00, -1.1786e-01 1.5129e+00, 1.1786e-01
], ],
[ [
5.4653e+01, 2.8885e+01, -1.9145e+00, 1.6362e+00, 4.0574e+00, 5.4653e+01, 2.8885e+01, -1.9145e+00, 1.6362e+00, 4.0574e+00,
1.5599e+00, -1.7335e+00 1.5599e+00, 1.7335e+00
], ],
[ [
5.5809e+01, 2.5686e+01, -1.4457e+00, 1.5939e+00, 3.8270e+00, 5.5809e+01, 2.5686e+01, -1.4457e+00, 1.5939e+00, 3.8270e+00,
1.4997e+00, -2.9191e+00 1.4997e+00, 2.9191e+00
], ],
[ [
5.6107e+01, 2.6082e+01, -1.3557e+00, 1.5782e+00, 3.7444e+00, 5.6107e+01, 2.6082e+01, -1.3557e+00, 1.5782e+00, 3.7444e+00,
1.5266e+00, 1.7707e-01 1.5266e+00, -1.7707e-01
], ],
[ [
3.1618e+01, -5.6004e+00, -1.2470e+00, 1.6459e+00, 4.1622e+00, 3.1618e+01, -5.6004e+00, -1.2470e+00, 1.6459e+00, 4.1622e+00,
1.5632e+00, -1.5734e+00 1.5632e+00, 1.5734e+00
], ],
[ [
3.1605e+01, -5.6342e+00, -1.2467e+00, 1.6474e+00, 4.1519e+00, 3.1605e+01, -5.6342e+00, -1.2467e+00, 1.6474e+00, 4.1519e+00,
1.5481e+00, -1.6313e+00 1.5481e+00, 1.6313e+00
], ],
[ [
5.6211e+01, 2.7294e+01, -1.5350e+00, 1.5422e+00, 3.7733e+00, 5.6211e+01, 2.7294e+01, -1.5350e+00, 1.5422e+00, 3.7733e+00,
1.5140e+00, 9.5846e-02 1.5140e+00, -9.5846e-02
], ],
[ [
5.5907e+01, 2.7155e+01, -1.4712e+00, 1.5416e+00, 3.7611e+00, 5.5907e+01, 2.7155e+01, -1.4712e+00, 1.5416e+00, 3.7611e+00,
1.5142e+00, -5.2059e-02 1.5142e+00, 5.2059e-02
], ],
[ [
5.4000e+01, 3.0585e+01, -1.6874e+00, 1.6495e+00, 4.0376e+00, 5.4000e+01, 3.0585e+01, -1.6874e+00, 1.6495e+00, 4.0376e+00,
1.5554e+00, -1.7900e+00 1.5554e+00, 1.7900e+00
], ],
[ [
5.6007e+01, 2.6300e+01, -1.3945e+00, 1.5716e+00, 3.7064e+00, 5.6007e+01, 2.6300e+01, -1.3945e+00, 1.5716e+00, 3.7064e+00,
1.4715e+00, -2.9639e+00 1.4715e+00, 2.9639e+00
]]).cuda() ]]).cuda()
input_meta = dict( input_meta = dict(
......
# Copyright (c) OpenMMLab. All rights reserved. # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import pytest import pytest
import torch import torch
...@@ -21,8 +22,8 @@ def test_single_roiaware_extractor(): ...@@ -21,8 +22,8 @@ def test_single_roiaware_extractor():
dtype=torch.float32).cuda() dtype=torch.float32).cuda()
coordinate = feats.clone() coordinate = feats.clone()
batch_inds = torch.zeros(feats.shape[0]).cuda() batch_inds = torch.zeros(feats.shape[0]).cuda()
rois = torch.tensor([[0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 0.3], rois = torch.tensor([[0, 1.0, 2.0, 3.0, 5.0, 4.0, 6.0, -0.3 - np.pi / 2],
[0, -10.0, 23.0, 16.0, 10, 20, 20, 0.5]], [0, -10.0, 23.0, 16.0, 20, 10, 20, -0.5 - np.pi / 2]],
dtype=torch.float32).cuda() dtype=torch.float32).cuda()
# test forward # test forward
pooled_feats = self(feats, coordinate, batch_inds, rois) pooled_feats = self(feats, coordinate, batch_inds, rois)
......
...@@ -53,11 +53,11 @@ def test_PointwiseSemanticHead(): ...@@ -53,11 +53,11 @@ def test_PointwiseSemanticHead():
gt_bboxes = [ gt_bboxes = [
LiDARInstance3DBoxes( LiDARInstance3DBoxes(
torch.tensor( torch.tensor(
[[6.4118, -3.4305, -1.7291, 1.7033, 3.4693, 1.6197, -0.9091]], [[6.4118, -3.4305, -1.7291, 1.7033, 3.4693, 1.6197, 0.9091]],
dtype=torch.float32).cuda()), dtype=torch.float32).cuda()),
LiDARInstance3DBoxes( LiDARInstance3DBoxes(
torch.tensor( torch.tensor(
[[16.9107, 9.7925, -1.9201, 1.6097, 3.2786, 1.5307, -2.4056]], [[16.9107, 9.7925, -1.9201, 1.6097, 3.2786, 1.5307, 2.4056]],
dtype=torch.float32).cuda()) dtype=torch.float32).cuda())
] ]
# batch size is 2 in the unit test # batch size is 2 in the unit test
......
...@@ -22,7 +22,7 @@ def test_anchor_3d_range_generator(): ...@@ -22,7 +22,7 @@ def test_anchor_3d_range_generator():
[0, -39.68, -0.6, 70.4, 39.68, -0.6], [0, -39.68, -0.6, 70.4, 39.68, -0.6],
[0, -39.68, -1.78, 70.4, 39.68, -1.78], [0, -39.68, -1.78, 70.4, 39.68, -1.78],
], ],
sizes=[[0.6, 0.8, 1.73], [0.6, 1.76, 1.73], [1.6, 3.9, 1.56]], sizes=[[0.8, 0.6, 1.73], [1.76, 0.6, 1.73], [3.9, 1.6, 1.56]],
rotations=[0, 1.57], rotations=[0, 1.57],
reshape_out=False) reshape_out=False)
...@@ -32,8 +32,8 @@ def test_anchor_3d_range_generator(): ...@@ -32,8 +32,8 @@ def test_anchor_3d_range_generator():
'[[0, -39.68, -0.6, 70.4, 39.68, -0.6], ' \ '[[0, -39.68, -0.6, 70.4, 39.68, -0.6], ' \
'[0, -39.68, -0.6, 70.4, 39.68, -0.6], ' \ '[0, -39.68, -0.6, 70.4, 39.68, -0.6], ' \
'[0, -39.68, -1.78, 70.4, 39.68, -1.78]],' \ '[0, -39.68, -1.78, 70.4, 39.68, -1.78]],' \
'\nscales=[1],\nsizes=[[0.6, 0.8, 1.73], ' \ '\nscales=[1],\nsizes=[[0.8, 0.6, 1.73], ' \
'[0.6, 1.76, 1.73], [1.6, 3.9, 1.56]],' \ '[1.76, 0.6, 1.73], [3.9, 1.6, 1.56]],' \
'\nrotations=[0, 1.57],\nreshape_out=False,' \ '\nrotations=[0, 1.57],\nreshape_out=False,' \
'\nsize_per_range=True)' '\nsize_per_range=True)'
assert repr_str == expected_repr_str assert repr_str == expected_repr_str
...@@ -54,8 +54,8 @@ def test_aligned_anchor_generator(): ...@@ -54,8 +54,8 @@ def test_aligned_anchor_generator():
ranges=[[-51.2, -51.2, -1.80, 51.2, 51.2, -1.80]], ranges=[[-51.2, -51.2, -1.80, 51.2, 51.2, -1.80]],
scales=[1, 2, 4], scales=[1, 2, 4],
sizes=[ sizes=[
[0.8660, 2.5981, 1.], # 1.5/sqrt(3) [2.5981, 0.8660, 1.], # 1.5/sqrt(3)
[0.5774, 1.7321, 1.], # 1/sqrt(3) [1.7321, 0.5774, 1.], # 1/sqrt(3)
[1., 1., 1.], [1., 1., 1.],
[0.4, 0.4, 1], [0.4, 0.4, 1],
], ],
...@@ -71,7 +71,7 @@ def test_aligned_anchor_generator(): ...@@ -71,7 +71,7 @@ def test_aligned_anchor_generator():
# check base anchors # check base anchors
expected_grid_anchors = [ expected_grid_anchors = [
torch.tensor([[ torch.tensor([[
-51.0000, -51.0000, -1.8000, 0.8660, 2.5981, 1.0000, 0.0000, -51.0000, -51.0000, -1.8000, 2.5981, 0.8660, 1.0000, 0.0000,
0.0000, 0.0000 0.0000, 0.0000
], ],
[ [
...@@ -91,20 +91,20 @@ def test_aligned_anchor_generator(): ...@@ -91,20 +91,20 @@ def test_aligned_anchor_generator():
0.0000, 0.0000, 0.0000 0.0000, 0.0000, 0.0000
], ],
[ [
-49.4000, -51.0000, -1.8000, 0.5774, 1.7321, 1.0000, -49.4000, -51.0000, -1.8000, 1.7321, 0.5774, 1.0000,
1.5700, 0.0000, 0.0000 1.5700, 0.0000, 0.0000
], ],
[ [
-49.0000, -51.0000, -1.8000, 0.5774, 1.7321, 1.0000, -49.0000, -51.0000, -1.8000, 1.7321, 0.5774, 1.0000,
0.0000, 0.0000, 0.0000 0.0000, 0.0000, 0.0000
], ],
[ [
-48.6000, -51.0000, -1.8000, 0.8660, 2.5981, 1.0000, -48.6000, -51.0000, -1.8000, 2.5981, 0.8660, 1.0000,
1.5700, 0.0000, 0.0000 1.5700, 0.0000, 0.0000
]], ]],
device=device), device=device),
torch.tensor([[ torch.tensor([[
-50.8000, -50.8000, -1.8000, 1.7320, 5.1962, 2.0000, 0.0000, -50.8000, -50.8000, -1.8000, 5.1962, 1.7320, 2.0000, 0.0000,
0.0000, 0.0000 0.0000, 0.0000
], ],
[ [
...@@ -124,20 +124,20 @@ def test_aligned_anchor_generator(): ...@@ -124,20 +124,20 @@ def test_aligned_anchor_generator():
0.0000, 0.0000, 0.0000 0.0000, 0.0000, 0.0000
], ],
[ [
-47.6000, -50.8000, -1.8000, 1.1548, 3.4642, 2.0000, -47.6000, -50.8000, -1.8000, 3.4642, 1.1548, 2.0000,
1.5700, 0.0000, 0.0000 1.5700, 0.0000, 0.0000
], ],
[ [
-46.8000, -50.8000, -1.8000, 1.1548, 3.4642, 2.0000, -46.8000, -50.8000, -1.8000, 3.4642, 1.1548, 2.0000,
0.0000, 0.0000, 0.0000 0.0000, 0.0000, 0.0000
], ],
[ [
-46.0000, -50.8000, -1.8000, 1.7320, 5.1962, 2.0000, -46.0000, -50.8000, -1.8000, 5.1962, 1.7320, 2.0000,
1.5700, 0.0000, 0.0000 1.5700, 0.0000, 0.0000
]], ]],
device=device), device=device),
torch.tensor([[ torch.tensor([[
-50.4000, -50.4000, -1.8000, 3.4640, 10.3924, 4.0000, 0.0000, -50.4000, -50.4000, -1.8000, 10.3924, 3.4640, 4.0000, 0.0000,
0.0000, 0.0000 0.0000, 0.0000
], ],
[ [
...@@ -157,15 +157,15 @@ def test_aligned_anchor_generator(): ...@@ -157,15 +157,15 @@ def test_aligned_anchor_generator():
0.0000, 0.0000, 0.0000 0.0000, 0.0000, 0.0000
], ],
[ [
-44.0000, -50.4000, -1.8000, 2.3096, 6.9284, 4.0000, -44.0000, -50.4000, -1.8000, 6.9284, 2.3096, 4.0000,
1.5700, 0.0000, 0.0000 1.5700, 0.0000, 0.0000
], ],
[ [
-42.4000, -50.4000, -1.8000, 2.3096, 6.9284, 4.0000, -42.4000, -50.4000, -1.8000, 6.9284, 2.3096, 4.0000,
0.0000, 0.0000, 0.0000 0.0000, 0.0000, 0.0000
], ],
[ [
-40.8000, -50.4000, -1.8000, 3.4640, 10.3924, 4.0000, -40.8000, -50.4000, -1.8000, 10.3924, 3.4640, 4.0000,
1.5700, 0.0000, 0.0000 1.5700, 0.0000, 0.0000
]], ]],
device=device) device=device)
...@@ -194,7 +194,7 @@ def test_aligned_anchor_generator_per_cls(): ...@@ -194,7 +194,7 @@ def test_aligned_anchor_generator_per_cls():
type='AlignedAnchor3DRangeGeneratorPerCls', type='AlignedAnchor3DRangeGeneratorPerCls',
ranges=[[-100, -100, -1.80, 100, 100, -1.80], ranges=[[-100, -100, -1.80, 100, 100, -1.80],
[-100, -100, -1.30, 100, 100, -1.30]], [-100, -100, -1.30, 100, 100, -1.30]],
sizes=[[0.63, 1.76, 1.44], [0.96, 2.35, 1.59]], sizes=[[1.76, 0.63, 1.44], [2.35, 0.96, 1.59]],
custom_values=[0, 0], custom_values=[0, 0],
rotations=[0, 1.57], rotations=[0, 1.57],
reshape_out=False) reshape_out=False)
...@@ -205,20 +205,20 @@ def test_aligned_anchor_generator_per_cls(): ...@@ -205,20 +205,20 @@ def test_aligned_anchor_generator_per_cls():
# check base anchors # check base anchors
expected_grid_anchors = [[ expected_grid_anchors = [[
torch.tensor([[ torch.tensor([[
-99.0000, -99.0000, -1.8000, 0.6300, 1.7600, 1.4400, 0.0000, -99.0000, -99.0000, -1.8000, 1.7600, 0.6300, 1.4400, 0.0000,
0.0000, 0.0000 0.0000, 0.0000
], ],
[ [
-99.0000, -99.0000, -1.8000, 0.6300, 1.7600, 1.4400, -99.0000, -99.0000, -1.8000, 1.7600, 0.6300, 1.4400,
1.5700, 0.0000, 0.0000 1.5700, 0.0000, 0.0000
]], ]],
device=device), device=device),
torch.tensor([[ torch.tensor([[
-98.0000, -98.0000, -1.3000, 0.9600, 2.3500, 1.5900, 0.0000, -98.0000, -98.0000, -1.3000, 2.3500, 0.9600, 1.5900, 0.0000,
0.0000, 0.0000 0.0000, 0.0000
], ],
[ [
-98.0000, -98.0000, -1.3000, 0.9600, 2.3500, 1.5900, -98.0000, -98.0000, -1.3000, 2.3500, 0.9600, 1.5900,
1.5700, 0.0000, 0.0000 1.5700, 0.0000, 0.0000
]], ]],
device=device) device=device)
......
...@@ -140,10 +140,15 @@ def test_lidar_boxes3d(): ...@@ -140,10 +140,15 @@ def test_lidar_boxes3d():
assert torch.allclose(expected_tensor, bottom_center_box.tensor) assert torch.allclose(expected_tensor, bottom_center_box.tensor)
# Test init with numpy array # Test init with numpy array
np_boxes = np.array( np_boxes = np.array([[
[[1.7802081, 2.516249, -1.7501148, 1.75, 3.39, 1.65, 1.48], 1.7802081, 2.516249, -1.7501148, 1.75, 3.39, 1.65,
[8.959413, 2.4567227, -1.6357126, 1.54, 4.01, 1.57, 1.62]], 1.48 - 0.13603681398218053 * 4
dtype=np.float32) ],
[
8.959413, 2.4567227, -1.6357126, 1.54, 4.01, 1.57,
1.62 - 0.13603681398218053 * 4
]],
dtype=np.float32)
boxes_1 = LiDARInstance3DBoxes(np_boxes) boxes_1 = LiDARInstance3DBoxes(np_boxes)
assert torch.allclose(boxes_1.tensor, torch.from_numpy(np_boxes)) assert torch.allclose(boxes_1.tensor, torch.from_numpy(np_boxes))
...@@ -157,15 +162,15 @@ def test_lidar_boxes3d(): ...@@ -157,15 +162,15 @@ def test_lidar_boxes3d():
th_boxes = torch.tensor( th_boxes = torch.tensor(
[[ [[
28.29669987, -0.5557558, -1.30332506, 1.47000003, 2.23000002, 28.29669987, -0.5557558, -1.30332506, 1.47000003, 2.23000002,
1.48000002, -1.57000005 1.48000002, -1.57000005 - 0.13603681398218053 * 4
], ],
[ [
26.66901946, 21.82302134, -1.73605708, 1.55999994, 3.48000002, 26.66901946, 21.82302134, -1.73605708, 1.55999994, 3.48000002,
1.39999998, -1.69000006 1.39999998, -1.69000006 - 0.13603681398218053 * 4
], ],
[ [
31.31977974, 8.16214412, -1.62177875, 1.74000001, 3.76999998, 31.31977974, 8.16214412, -1.62177875, 1.74000001, 3.76999998,
1.48000002, 2.78999996 1.48000002, 2.78999996 - 0.13603681398218053 * 4
]], ]],
dtype=torch.float32) dtype=torch.float32)
boxes_2 = LiDARInstance3DBoxes(th_boxes) boxes_2 = LiDARInstance3DBoxes(th_boxes)
...@@ -176,12 +181,30 @@ def test_lidar_boxes3d(): ...@@ -176,12 +181,30 @@ def test_lidar_boxes3d():
boxes_1 = boxes_1.to(boxes_2.device) boxes_1 = boxes_1.to(boxes_2.device)
# test box concatenation # test box concatenation
expected_tensor = torch.tensor( expected_tensor = torch.tensor([[
[[1.7802081, 2.516249, -1.7501148, 1.75, 3.39, 1.65, 1.48], 1.7802081, 2.516249, -1.7501148, 1.75, 3.39, 1.65,
[8.959413, 2.4567227, -1.6357126, 1.54, 4.01, 1.57, 1.62], 1.48 - 0.13603681398218053 * 4
[28.2967, -0.5557558, -1.303325, 1.47, 2.23, 1.48, -1.57], ],
[26.66902, 21.82302, -1.736057, 1.56, 3.48, 1.4, -1.69], [
[31.31978, 8.162144, -1.6217787, 1.74, 3.77, 1.48, 2.79]]) 8.959413, 2.4567227, -1.6357126, 1.54,
4.01, 1.57,
1.62 - 0.13603681398218053 * 4
],
[
28.2967, -0.5557558, -1.303325, 1.47,
2.23, 1.48,
-1.57 - 0.13603681398218053 * 4
],
[
26.66902, 21.82302, -1.736057, 1.56,
3.48, 1.4,
-1.69 - 0.13603681398218053 * 4
],
[
31.31978, 8.162144, -1.6217787, 1.74,
3.77, 1.48,
2.79 - 0.13603681398218053 * 4
]])
boxes = LiDARInstance3DBoxes.cat([boxes_1, boxes_2]) boxes = LiDARInstance3DBoxes.cat([boxes_1, boxes_2])
assert torch.allclose(boxes.tensor, expected_tensor) assert torch.allclose(boxes.tensor, expected_tensor)
# concatenate empty list # concatenate empty list
...@@ -196,11 +219,26 @@ def test_lidar_boxes3d(): ...@@ -196,11 +219,26 @@ def test_lidar_boxes3d():
[0.6533, -0.5520, -0.5265], [0.6533, -0.5520, -0.5265],
[4.5870, 0.5358, -1.4741]]) [4.5870, 0.5358, -1.4741]])
expected_tensor = torch.tensor( expected_tensor = torch.tensor(
[[1.7802081, -2.516249, -1.7501148, 1.75, 3.39, 1.65, 1.6615927], [[
[8.959413, -2.4567227, -1.6357126, 1.54, 4.01, 1.57, 1.5215927], 1.7802081, -2.516249, -1.7501148, 1.75, 3.39, 1.65,
[28.2967, 0.5557558, -1.303325, 1.47, 2.23, 1.48, 4.7115927], 1.6615927 - np.pi + 0.13603681398218053 * 4
[26.66902, -21.82302, -1.736057, 1.56, 3.48, 1.4, 4.8315926], ],
[31.31978, -8.162144, -1.6217787, 1.74, 3.77, 1.48, 0.35159278]]) [
8.959413, -2.4567227, -1.6357126, 1.54, 4.01, 1.57,
1.5215927 - np.pi + 0.13603681398218053 * 4
],
[
28.2967, 0.5557558, -1.303325, 1.47, 2.23, 1.48,
4.7115927 - np.pi + 0.13603681398218053 * 4
],
[
26.66902, -21.82302, -1.736057, 1.56, 3.48, 1.4,
4.8315926 - np.pi + 0.13603681398218053 * 4
],
[
31.31978, -8.162144, -1.6217787, 1.74, 3.77, 1.48,
0.35159278 - np.pi + 0.13603681398218053 * 4
]])
expected_points = torch.tensor([[1.2559, 0.6762, -1.4658], expected_points = torch.tensor([[1.2559, 0.6762, -1.4658],
[4.7814, 0.8784, -1.3857], [4.7814, 0.8784, -1.3857],
[6.7053, -0.2517, -0.9697], [6.7053, -0.2517, -0.9697],
...@@ -211,11 +249,26 @@ def test_lidar_boxes3d(): ...@@ -211,11 +249,26 @@ def test_lidar_boxes3d():
assert torch.allclose(points, expected_points, 1e-3) assert torch.allclose(points, expected_points, 1e-3)
expected_tensor = torch.tensor( expected_tensor = torch.tensor(
[[-1.7802, -2.5162, -1.7501, 1.7500, 3.3900, 1.6500, -1.6616], [[
[-8.9594, -2.4567, -1.6357, 1.5400, 4.0100, 1.5700, -1.5216], -1.7802, -2.5162, -1.7501, 1.7500, 3.3900, 1.6500,
[-28.2967, 0.5558, -1.3033, 1.4700, 2.2300, 1.4800, -4.7116], -1.6616 + np.pi * 2 - 0.13603681398218053 * 4
[-26.6690, -21.8230, -1.7361, 1.5600, 3.4800, 1.4000, -4.8316], ],
[-31.3198, -8.1621, -1.6218, 1.7400, 3.7700, 1.4800, -0.3516]]) [
-8.9594, -2.4567, -1.6357, 1.5400, 4.0100, 1.5700,
-1.5216 + np.pi * 2 - 0.13603681398218053 * 4
],
[
-28.2967, 0.5558, -1.3033, 1.4700, 2.2300, 1.4800,
-4.7116 + np.pi * 2 - 0.13603681398218053 * 4
],
[
-26.6690, -21.8230, -1.7361, 1.5600, 3.4800, 1.4000,
-4.8316 + np.pi * 2 - 0.13603681398218053 * 4
],
[
-31.3198, -8.1621, -1.6218, 1.7400, 3.7700, 1.4800,
-0.3516 + np.pi * 2 - 0.13603681398218053 * 4
]])
boxes_flip_vert = boxes.clone() boxes_flip_vert = boxes.clone()
points = boxes_flip_vert.flip('vertical', points) points = boxes_flip_vert.flip('vertical', points)
expected_points = torch.tensor([[-1.2559, 0.6762, -1.4658], expected_points = torch.tensor([[-1.2559, 0.6762, -1.4658],
...@@ -229,12 +282,27 @@ def test_lidar_boxes3d(): ...@@ -229,12 +282,27 @@ def test_lidar_boxes3d():
# test box rotation # test box rotation
# with input torch.Tensor points and angle # with input torch.Tensor points and angle
expected_tensor = torch.tensor( expected_tensor = torch.tensor(
[[1.4225, -2.7344, -1.7501, 1.7500, 3.3900, 1.6500, 1.7976], [[
[8.5435, -3.6491, -1.6357, 1.5400, 4.0100, 1.5700, 1.6576], 1.4225, -2.7344, -1.7501, 1.7500, 3.3900, 1.6500,
[28.1106, -3.2869, -1.3033, 1.4700, 2.2300, 1.4800, 4.8476], 1.7976 - np.pi + 0.13603681398218053 * 2
[23.4630, -25.2382, -1.7361, 1.5600, 3.4800, 1.4000, 4.9676], ],
[29.9235, -12.3342, -1.6218, 1.7400, 3.7700, 1.4800, 0.4876]]) [
points, rot_mat_T = boxes.rotate(0.13603681398218053, points) 8.5435, -3.6491, -1.6357, 1.5400, 4.0100, 1.5700,
1.6576 - np.pi + 0.13603681398218053 * 2
],
[
28.1106, -3.2869, -1.3033, 1.4700, 2.2300, 1.4800,
4.8476 - np.pi + 0.13603681398218053 * 2
],
[
23.4630, -25.2382, -1.7361, 1.5600, 3.4800, 1.4000,
4.9676 - np.pi + 0.13603681398218053 * 2
],
[
29.9235, -12.3342, -1.6218, 1.7400, 3.7700, 1.4800,
0.4876 - np.pi + 0.13603681398218053 * 2
]])
points, rot_mat_T = boxes.rotate(-0.13603681398218053, points)
expected_points = torch.tensor([[-1.1526, 0.8403, -1.4658], expected_points = torch.tensor([[-1.1526, 0.8403, -1.4658],
[-4.6181, 1.5187, -1.3857], [-4.6181, 1.5187, -1.3857],
[-6.6775, 0.6600, -0.9697], [-6.6775, 0.6600, -0.9697],
...@@ -248,7 +316,7 @@ def test_lidar_boxes3d(): ...@@ -248,7 +316,7 @@ def test_lidar_boxes3d():
assert torch.allclose(rot_mat_T, expected_rot_mat_T, 1e-3) assert torch.allclose(rot_mat_T, expected_rot_mat_T, 1e-3)
# with input torch.Tensor points and rotation matrix # with input torch.Tensor points and rotation matrix
points, rot_mat_T = boxes.rotate(-0.13603681398218053, points) # back points, rot_mat_T = boxes.rotate(0.13603681398218053, points) # back
rot_mat = np.array([[0.99076125, -0.13561762, 0.], rot_mat = np.array([[0.99076125, -0.13561762, 0.],
[0.13561762, 0.99076125, 0.], [0., 0., 1.]]) [0.13561762, 0.99076125, 0.], [0., 0., 1.]])
points, rot_mat_T = boxes.rotate(rot_mat, points) points, rot_mat_T = boxes.rotate(rot_mat, points)
...@@ -262,7 +330,7 @@ def test_lidar_boxes3d(): ...@@ -262,7 +330,7 @@ def test_lidar_boxes3d():
[-6.5263, 1.5595, [-6.5263, 1.5595,
-0.9697], [-0.4809, 0.7073, -0.5265], -0.9697], [-0.4809, 0.7073, -0.5265],
[-4.5623, 0.7166, -1.4741]]) [-4.5623, 0.7166, -1.4741]])
points_np, rot_mat_T_np = boxes.rotate(0.13603681398218053, points_np) points_np, rot_mat_T_np = boxes.rotate(-0.13603681398218053, points_np)
expected_points_np = np.array([[-0.8844, 1.1191, -1.4658], expected_points_np = np.array([[-0.8844, 1.1191, -1.4658],
[-4.0401, 2.7039, -1.3857], [-4.0401, 2.7039, -1.3857],
[-6.2545, 2.4302, -0.9697], [-6.2545, 2.4302, -0.9697],
...@@ -276,7 +344,7 @@ def test_lidar_boxes3d(): ...@@ -276,7 +344,7 @@ def test_lidar_boxes3d():
assert np.allclose(rot_mat_T_np, expected_rot_mat_T_np, 1e-3) assert np.allclose(rot_mat_T_np, expected_rot_mat_T_np, 1e-3)
# with input LiDARPoints and rotation matrix # with input LiDARPoints and rotation matrix
points_np, rot_mat_T_np = boxes.rotate(-0.13603681398218053, points_np) points_np, rot_mat_T_np = boxes.rotate(0.13603681398218053, points_np)
lidar_points = LiDARPoints(points_np) lidar_points = LiDARPoints(points_np)
lidar_points, rot_mat_T_np = boxes.rotate(rot_mat, lidar_points) lidar_points, rot_mat_T_np = boxes.rotate(rot_mat, lidar_points)
points_np = lidar_points.tensor.numpy() points_np = lidar_points.tensor.numpy()
...@@ -287,27 +355,27 @@ def test_lidar_boxes3d(): ...@@ -287,27 +355,27 @@ def test_lidar_boxes3d():
# test box scaling # test box scaling
expected_tensor = torch.tensor([[ expected_tensor = torch.tensor([[
1.0443488, -2.9183323, -1.7599131, 1.7597977, 3.4089797, 1.6592377, 1.0443488, -2.9183323, -1.7599131, 1.7597977, 3.4089797, 1.6592377,
1.9336663 1.9336663 - np.pi
], ],
[ [
8.014273, -4.8007393, -1.6448704, 8.014273, -4.8007393, -1.6448704,
1.5486219, 4.0324507, 1.57879, 1.5486219, 4.0324507, 1.57879,
1.7936664 1.7936664 - np.pi
], ],
[ [
27.558605, -7.1084175, -1.310622, 27.558605, -7.1084175, -1.310622,
1.4782301, 2.242485, 1.488286, 1.4782301, 2.242485, 1.488286,
4.9836664 4.9836664 - np.pi
], ],
[ [
19.934517, -28.344835, -1.7457767, 19.934517, -28.344835, -1.7457767,
1.5687338, 3.4994833, 1.4078381, 1.5687338, 3.4994833, 1.4078381,
5.1036663 5.1036663 - np.pi
], ],
[ [
28.130915, -16.369587, -1.6308585, 28.130915, -16.369587, -1.6308585,
1.7497417, 3.791107, 1.488286, 1.7497417, 3.791107, 1.488286,
0.6236664 0.6236664 - np.pi
]]) ]])
boxes.scale(1.00559866335275) boxes.scale(1.00559866335275)
assert torch.allclose(boxes.tensor, expected_tensor) assert torch.allclose(boxes.tensor, expected_tensor)
...@@ -315,27 +383,27 @@ def test_lidar_boxes3d(): ...@@ -315,27 +383,27 @@ def test_lidar_boxes3d():
# test box translation # test box translation
expected_tensor = torch.tensor([[ expected_tensor = torch.tensor([[
1.1281544, -3.0507944, -1.9169292, 1.7597977, 3.4089797, 1.6592377, 1.1281544, -3.0507944, -1.9169292, 1.7597977, 3.4089797, 1.6592377,
1.9336663 1.9336663 - np.pi
], ],
[ [
8.098079, -4.9332013, -1.8018866, 8.098079, -4.9332013, -1.8018866,
1.5486219, 4.0324507, 1.57879, 1.5486219, 4.0324507, 1.57879,
1.7936664 1.7936664 - np.pi
], ],
[ [
27.64241, -7.2408795, -1.4676381, 27.64241, -7.2408795, -1.4676381,
1.4782301, 2.242485, 1.488286, 1.4782301, 2.242485, 1.488286,
4.9836664 4.9836664 - np.pi
], ],
[ [
20.018322, -28.477297, -1.9027928, 20.018322, -28.477297, -1.9027928,
1.5687338, 3.4994833, 1.4078381, 1.5687338, 3.4994833, 1.4078381,
5.1036663 5.1036663 - np.pi
], ],
[ [
28.21472, -16.502048, -1.7878747, 28.21472, -16.502048, -1.7878747,
1.7497417, 3.791107, 1.488286, 1.7497417, 3.791107, 1.488286,
0.6236664 0.6236664 - np.pi
]]) ]])
boxes.translate([0.0838056, -0.13246193, -0.15701613]) boxes.translate([0.0838056, -0.13246193, -0.15701613])
assert torch.allclose(boxes.tensor, expected_tensor) assert torch.allclose(boxes.tensor, expected_tensor)
...@@ -356,17 +424,17 @@ def test_lidar_boxes3d(): ...@@ -356,17 +424,17 @@ def test_lidar_boxes3d():
index_boxes = boxes[2:5] index_boxes = boxes[2:5]
expected_tensor = torch.tensor([[ expected_tensor = torch.tensor([[
27.64241, -7.2408795, -1.4676381, 1.4782301, 2.242485, 1.488286, 27.64241, -7.2408795, -1.4676381, 1.4782301, 2.242485, 1.488286,
4.9836664 4.9836664 - np.pi
], ],
[ [
20.018322, -28.477297, -1.9027928, 20.018322, -28.477297, -1.9027928,
1.5687338, 3.4994833, 1.4078381, 1.5687338, 3.4994833, 1.4078381,
5.1036663 5.1036663 - np.pi
], ],
[ [
28.21472, -16.502048, -1.7878747, 28.21472, -16.502048, -1.7878747,
1.7497417, 3.791107, 1.488286, 1.7497417, 3.791107, 1.488286,
0.6236664 0.6236664 - np.pi
]]) ]])
assert len(index_boxes) == 3 assert len(index_boxes) == 3
assert torch.allclose(index_boxes.tensor, expected_tensor) assert torch.allclose(index_boxes.tensor, expected_tensor)
...@@ -374,7 +442,7 @@ def test_lidar_boxes3d(): ...@@ -374,7 +442,7 @@ def test_lidar_boxes3d():
index_boxes = boxes[2] index_boxes = boxes[2]
expected_tensor = torch.tensor([[ expected_tensor = torch.tensor([[
27.64241, -7.2408795, -1.4676381, 1.4782301, 2.242485, 1.488286, 27.64241, -7.2408795, -1.4676381, 1.4782301, 2.242485, 1.488286,
4.9836664 4.9836664 - np.pi
]]) ]])
assert len(index_boxes) == 1 assert len(index_boxes) == 1
assert torch.allclose(index_boxes.tensor, expected_tensor) assert torch.allclose(index_boxes.tensor, expected_tensor)
...@@ -382,12 +450,12 @@ def test_lidar_boxes3d(): ...@@ -382,12 +450,12 @@ def test_lidar_boxes3d():
index_boxes = boxes[[2, 4]] index_boxes = boxes[[2, 4]]
expected_tensor = torch.tensor([[ expected_tensor = torch.tensor([[
27.64241, -7.2408795, -1.4676381, 1.4782301, 2.242485, 1.488286, 27.64241, -7.2408795, -1.4676381, 1.4782301, 2.242485, 1.488286,
4.9836664 4.9836664 - np.pi
], ],
[ [
28.21472, -16.502048, -1.7878747, 28.21472, -16.502048, -1.7878747,
1.7497417, 3.791107, 1.488286, 1.7497417, 3.791107, 1.488286,
0.6236664 0.6236664 - np.pi
]]) ]])
assert len(index_boxes) == 2 assert len(index_boxes) == 2
assert torch.allclose(index_boxes.tensor, expected_tensor) assert torch.allclose(index_boxes.tensor, expected_tensor)
...@@ -408,13 +476,13 @@ def test_lidar_boxes3d(): ...@@ -408,13 +476,13 @@ def test_lidar_boxes3d():
assert (boxes.tensor[:, 6] >= -np.pi / 2).all() assert (boxes.tensor[:, 6] >= -np.pi / 2).all()
Box3DMode.convert(boxes, Box3DMode.LIDAR, Box3DMode.LIDAR) Box3DMode.convert(boxes, Box3DMode.LIDAR, Box3DMode.LIDAR)
expected_tesor = boxes.tensor.clone() expected_tensor = boxes.tensor.clone()
assert torch.allclose(expected_tesor, boxes.tensor) assert torch.allclose(expected_tensor, boxes.tensor)
boxes.flip() boxes.flip()
boxes.flip() boxes.flip()
boxes.limit_yaw() boxes.limit_yaw()
assert torch.allclose(expected_tesor, boxes.tensor) assert torch.allclose(expected_tensor, boxes.tensor)
# test nearest_bev # test nearest_bev
expected_tensor = torch.tensor([[-0.5763, -3.9307, 2.8326, -2.1709], expected_tensor = torch.tensor([[-0.5763, -3.9307, 2.8326, -2.1709],
...@@ -422,52 +490,50 @@ def test_lidar_boxes3d(): ...@@ -422,52 +490,50 @@ def test_lidar_boxes3d():
[26.5212, -7.9800, 28.7637, -6.5018], [26.5212, -7.9800, 28.7637, -6.5018],
[18.2686, -29.2617, 21.7681, -27.6929], [18.2686, -29.2617, 21.7681, -27.6929],
[27.3398, -18.3976, 29.0896, -14.6065]]) [27.3398, -18.3976, 29.0896, -14.6065]])
# the pytorch print loses some precision
assert torch.allclose( assert torch.allclose(
boxes.nearest_bev, expected_tensor, rtol=1e-4, atol=1e-7) boxes.nearest_bev, expected_tensor, rtol=1e-4, atol=1e-7)
# obtained by the print of the original implementation expected_tensor = torch.tensor([[[-7.7767e-01, -2.8332e+00, -1.9169e+00],
expected_tensor = torch.tensor([[[2.4093e+00, -4.4784e+00, -1.9169e+00], [-7.7767e-01, -2.8332e+00, -2.5769e-01],
[2.4093e+00, -4.4784e+00, -2.5769e-01], [2.4093e+00, -1.6232e+00, -2.5769e-01],
[-7.7767e-01, -3.2684e+00, -2.5769e-01], [2.4093e+00, -1.6232e+00, -1.9169e+00],
[-7.7767e-01, -3.2684e+00, -1.9169e+00], [-1.5301e-01, -4.4784e+00, -1.9169e+00],
[3.0340e+00, -2.8332e+00, -1.9169e+00], [-1.5301e-01, -4.4784e+00, -2.5769e-01],
[3.0340e+00, -2.8332e+00, -2.5769e-01], [3.0340e+00, -3.2684e+00, -2.5769e-01],
[-1.5301e-01, -1.6232e+00, -2.5769e-01], [3.0340e+00, -3.2684e+00, -1.9169e+00]],
[-1.5301e-01, -1.6232e+00, -1.9169e+00]], [[5.9606e+00, -4.6237e+00, -1.8019e+00],
[[9.8933e+00, -6.1340e+00, -1.8019e+00], [5.9606e+00, -4.6237e+00, -2.2310e-01],
[9.8933e+00, -6.1340e+00, -2.2310e-01], [9.8933e+00, -3.7324e+00, -2.2310e-01],
[5.9606e+00, -5.2427e+00, -2.2310e-01], [9.8933e+00, -3.7324e+00, -1.8019e+00],
[5.9606e+00, -5.2427e+00, -1.8019e+00], [6.3029e+00, -6.1340e+00, -1.8019e+00],
[1.0236e+01, -4.6237e+00, -1.8019e+00], [6.3029e+00, -6.1340e+00, -2.2310e-01],
[1.0236e+01, -4.6237e+00, -2.2310e-01], [1.0236e+01, -5.2427e+00, -2.2310e-01],
[6.3029e+00, -3.7324e+00, -2.2310e-01], [1.0236e+01, -5.2427e+00, -1.8019e+00]],
[6.3029e+00, -3.7324e+00, -1.8019e+00]], [[2.6364e+01, -6.8292e+00, -1.4676e+00],
[[2.8525e+01, -8.2534e+00, -1.4676e+00], [2.6364e+01, -6.8292e+00, 2.0648e-02],
[2.8525e+01, -8.2534e+00, 2.0648e-02], [2.8525e+01, -6.2283e+00, 2.0648e-02],
[2.6364e+01, -7.6525e+00, 2.0648e-02], [2.8525e+01, -6.2283e+00, -1.4676e+00],
[2.6364e+01, -7.6525e+00, -1.4676e+00], [2.6760e+01, -8.2534e+00, -1.4676e+00],
[2.8921e+01, -6.8292e+00, -1.4676e+00], [2.6760e+01, -8.2534e+00, 2.0648e-02],
[2.8921e+01, -6.8292e+00, 2.0648e-02], [2.8921e+01, -7.6525e+00, 2.0648e-02],
[2.6760e+01, -6.2283e+00, 2.0648e-02], [2.8921e+01, -7.6525e+00, -1.4676e+00]],
[2.6760e+01, -6.2283e+00, -1.4676e+00]], [[1.8102e+01, -2.8420e+01, -1.9028e+00],
[[2.1337e+01, -2.9870e+01, -1.9028e+00], [1.8102e+01, -2.8420e+01, -4.9495e-01],
[2.1337e+01, -2.9870e+01, -4.9495e-01], [2.1337e+01, -2.7085e+01, -4.9495e-01],
[1.8102e+01, -2.8535e+01, -4.9495e-01], [2.1337e+01, -2.7085e+01, -1.9028e+00],
[1.8102e+01, -2.8535e+01, -1.9028e+00], [1.8700e+01, -2.9870e+01, -1.9028e+00],
[2.1935e+01, -2.8420e+01, -1.9028e+00], [1.8700e+01, -2.9870e+01, -4.9495e-01],
[2.1935e+01, -2.8420e+01, -4.9495e-01], [2.1935e+01, -2.8535e+01, -4.9495e-01],
[1.8700e+01, -2.7085e+01, -4.9495e-01], [2.1935e+01, -2.8535e+01, -1.9028e+00]],
[1.8700e+01, -2.7085e+01, -1.9028e+00]], [[2.8612e+01, -1.8552e+01, -1.7879e+00],
[[2.6398e+01, -1.7530e+01, -1.7879e+00], [2.8612e+01, -1.8552e+01, -2.9959e-01],
[2.6398e+01, -1.7530e+01, -2.9959e-01], [2.6398e+01, -1.5474e+01, -2.9959e-01],
[2.8612e+01, -1.4452e+01, -2.9959e-01], [2.6398e+01, -1.5474e+01, -1.7879e+00],
[2.8612e+01, -1.4452e+01, -1.7879e+00], [3.0032e+01, -1.7530e+01, -1.7879e+00],
[2.7818e+01, -1.8552e+01, -1.7879e+00], [3.0032e+01, -1.7530e+01, -2.9959e-01],
[2.7818e+01, -1.8552e+01, -2.9959e-01], [2.7818e+01, -1.4452e+01, -2.9959e-01],
[3.0032e+01, -1.5474e+01, -2.9959e-01], [2.7818e+01, -1.4452e+01, -1.7879e+00]]])
[3.0032e+01, -1.5474e+01, -1.7879e+00]]])
# the pytorch print loses some precision
assert torch.allclose(boxes.corners, expected_tensor, rtol=1e-4, atol=1e-7) assert torch.allclose(boxes.corners, expected_tensor, rtol=1e-4, atol=1e-7)
# test new_box # test new_box
...@@ -558,26 +624,27 @@ def test_boxes_conversion(): ...@@ -558,26 +624,27 @@ def test_boxes_conversion():
[0.000000e+00, 0.000000e+00, 0.000000e+00, 1.000000e+00]], [0.000000e+00, 0.000000e+00, 0.000000e+00, 1.000000e+00]],
dtype=torch.float32) dtype=torch.float32)
# coord sys refactor (reverse sign of yaw)
expected_tensor = torch.tensor( expected_tensor = torch.tensor(
[[ [[
2.16902434e+01, -4.06038554e-02, -1.61906639e+00, 1.65999997e+00, 2.16902434e+01, -4.06038554e-02, -1.61906639e+00, 3.20000005e+00,
3.20000005e+00, 1.61000001e+00, -1.53999996e+00 1.65999997e+00, 1.61000001e+00, 1.53999996e+00 - np.pi / 2
], ],
[ [
7.05006905e+00, -6.57459601e+00, -1.60107949e+00, 2.27999997e+00, 7.05006905e+00, -6.57459601e+00, -1.60107949e+00, 1.27799997e+01,
1.27799997e+01, 3.66000009e+00, 1.54999995e+00 2.27999997e+00, 3.66000009e+00, -1.54999995e+00 - np.pi / 2
], ],
[ [
2.24698818e+01, -6.69203759e+00, -1.50118145e+00, 2.31999993e+00, 2.24698818e+01, -6.69203759e+00, -1.50118145e+00, 1.47299995e+01,
1.47299995e+01, 3.64000010e+00, 1.59000003e+00 2.31999993e+00, 3.64000010e+00, -1.59000003e+00 + 3 * np.pi / 2
], ],
[ [
3.48291965e+01, -7.09058388e+00, -1.36622983e+00, 2.31999993e+00, 3.48291965e+01, -7.09058388e+00, -1.36622983e+00, 1.00400000e+01,
1.00400000e+01, 3.60999990e+00, 1.61000001e+00 2.31999993e+00, 3.60999990e+00, -1.61000001e+00 + 3 * np.pi / 2
], ],
[ [
4.62394617e+01, -7.75838800e+00, -1.32405020e+00, 2.33999991e+00, 4.62394617e+01, -7.75838800e+00, -1.32405020e+00, 1.28299999e+01,
1.28299999e+01, 3.63000011e+00, 1.63999999e+00 2.33999991e+00, 3.63000011e+00, -1.63999999e+00 + 3 * np.pi / 2
]], ]],
dtype=torch.float32) dtype=torch.float32)
...@@ -637,10 +704,15 @@ def test_boxes_conversion(): ...@@ -637,10 +704,15 @@ def test_boxes_conversion():
def test_camera_boxes3d(): def test_camera_boxes3d():
# Test init with numpy array # Test init with numpy array
np_boxes = np.array( np_boxes = np.array([[
[[1.7802081, 2.516249, -1.7501148, 1.75, 3.39, 1.65, 1.48], 1.7802081, 2.516249, -1.7501148, 1.75, 3.39, 1.65,
[8.959413, 2.4567227, -1.6357126, 1.54, 4.01, 1.57, 1.62]], 1.48 - 0.13603681398218053 * 4 - 2 * np.pi
dtype=np.float32) ],
[
8.959413, 2.4567227, -1.6357126, 1.54, 4.01, 1.57,
1.62 - 0.13603681398218053 * 4 - 2 * np.pi
]],
dtype=np.float32)
boxes_1 = Box3DMode.convert( boxes_1 = Box3DMode.convert(
LiDARInstance3DBoxes(np_boxes), Box3DMode.LIDAR, Box3DMode.CAM) LiDARInstance3DBoxes(np_boxes), Box3DMode.LIDAR, Box3DMode.CAM)
...@@ -654,15 +726,15 @@ def test_camera_boxes3d(): ...@@ -654,15 +726,15 @@ def test_camera_boxes3d():
th_boxes = torch.tensor( th_boxes = torch.tensor(
[[ [[
28.29669987, -0.5557558, -1.30332506, 1.47000003, 2.23000002, 28.29669987, -0.5557558, -1.30332506, 1.47000003, 2.23000002,
1.48000002, -1.57000005 1.48000002, -1.57000005 - 0.13603681398218053 * 4 - 2 * np.pi
], ],
[ [
26.66901946, 21.82302134, -1.73605708, 1.55999994, 3.48000002, 26.66901946, 21.82302134, -1.73605708, 1.55999994, 3.48000002,
1.39999998, -1.69000006 1.39999998, -1.69000006 - 0.13603681398218053 * 4 - 2 * np.pi
], ],
[ [
31.31977974, 8.16214412, -1.62177875, 1.74000001, 3.76999998, 31.31977974, 8.16214412, -1.62177875, 1.74000001, 3.76999998,
1.48000002, 2.78999996 1.48000002, 2.78999996 - 0.13603681398218053 * 4 - 2 * np.pi
]], ]],
dtype=torch.float32) dtype=torch.float32)
cam_th_boxes = Box3DMode.convert(th_boxes, Box3DMode.LIDAR, Box3DMode.CAM) cam_th_boxes = Box3DMode.convert(th_boxes, Box3DMode.LIDAR, Box3DMode.CAM)
...@@ -675,13 +747,26 @@ def test_camera_boxes3d(): ...@@ -675,13 +747,26 @@ def test_camera_boxes3d():
# test box concatenation # test box concatenation
expected_tensor = Box3DMode.convert( expected_tensor = Box3DMode.convert(
torch.tensor( torch.tensor([[
[[1.7802081, 2.516249, -1.7501148, 1.75, 3.39, 1.65, 1.48], 1.7802081, 2.516249, -1.7501148, 1.75, 3.39, 1.65,
[8.959413, 2.4567227, -1.6357126, 1.54, 4.01, 1.57, 1.62], 1.48 - 0.13603681398218053 * 4 - 2 * np.pi
[28.2967, -0.5557558, -1.303325, 1.47, 2.23, 1.48, -1.57], ],
[26.66902, 21.82302, -1.736057, 1.56, 3.48, 1.4, -1.69], [
[31.31978, 8.162144, -1.6217787, 1.74, 3.77, 1.48, 2.79]]), 8.959413, 2.4567227, -1.6357126, 1.54, 4.01, 1.57,
Box3DMode.LIDAR, Box3DMode.CAM) 1.62 - 0.13603681398218053 * 4 - 2 * np.pi
],
[
28.2967, -0.5557558, -1.303325, 1.47, 2.23, 1.48,
-1.57 - 0.13603681398218053 * 4 - 2 * np.pi
],
[
26.66902, 21.82302, -1.736057, 1.56, 3.48, 1.4,
-1.69 - 0.13603681398218053 * 4 - 2 * np.pi
],
[
31.31978, 8.162144, -1.6217787, 1.74, 3.77, 1.48,
2.79 - 0.13603681398218053 * 4 - 2 * np.pi
]]), Box3DMode.LIDAR, Box3DMode.CAM)
boxes = CameraInstance3DBoxes.cat([boxes_1, boxes_2]) boxes = CameraInstance3DBoxes.cat([boxes_1, boxes_2])
assert torch.allclose(boxes.tensor, expected_tensor) assert torch.allclose(boxes.tensor, expected_tensor)
...@@ -690,28 +775,60 @@ def test_camera_boxes3d(): ...@@ -690,28 +775,60 @@ def test_camera_boxes3d():
[-0.2517, 0.9697, 6.7053], [0.5520, 0.5265, 0.6533], [-0.2517, 0.9697, 6.7053], [0.5520, 0.5265, 0.6533],
[-0.5358, 1.4741, 4.5870]]) [-0.5358, 1.4741, 4.5870]])
expected_tensor = Box3DMode.convert( expected_tensor = Box3DMode.convert(
torch.tensor( torch.tensor([[
[[1.7802081, -2.516249, -1.7501148, 1.75, 3.39, 1.65, 1.6615927], 1.7802081, -2.516249, -1.7501148, 1.75, 3.39, 1.65,
[8.959413, -2.4567227, -1.6357126, 1.54, 4.01, 1.57, 1.5215927], 1.6615927 + 0.13603681398218053 * 4 - np.pi
[28.2967, 0.5557558, -1.303325, 1.47, 2.23, 1.48, 4.7115927], ],
[26.66902, -21.82302, -1.736057, 1.56, 3.48, 1.4, 4.8315926], [
[31.31978, -8.162144, -1.6217787, 1.74, 3.77, 1.48, 0.35159278]]), 8.959413, -2.4567227, -1.6357126, 1.54, 4.01, 1.57,
Box3DMode.LIDAR, Box3DMode.CAM) 1.5215927 + 0.13603681398218053 * 4 - np.pi
],
[
28.2967, 0.5557558, -1.303325, 1.47, 2.23, 1.48,
4.7115927 + 0.13603681398218053 * 4 - np.pi
],
[
26.66902, -21.82302, -1.736057, 1.56, 3.48, 1.4,
4.8315926 + 0.13603681398218053 * 4 - np.pi
],
[
31.31978, -8.162144, -1.6217787, 1.74, 3.77, 1.48,
0.35159278 + 0.13603681398218053 * 4 - np.pi
]]), Box3DMode.LIDAR, Box3DMode.CAM)
points = boxes.flip('horizontal', points) points = boxes.flip('horizontal', points)
expected_points = torch.tensor([[-0.6762, 1.4658, 1.2559], expected_points = torch.tensor([[-0.6762, 1.4658, 1.2559],
[-0.8784, 1.3857, 4.7814], [-0.8784, 1.3857, 4.7814],
[0.2517, 0.9697, 6.7053], [0.2517, 0.9697, 6.7053],
[-0.5520, 0.5265, 0.6533], [-0.5520, 0.5265, 0.6533],
[0.5358, 1.4741, 4.5870]]) [0.5358, 1.4741, 4.5870]])
assert torch.allclose(boxes.tensor, expected_tensor)
yaw_normalized_tensor = boxes.tensor.clone()
yaw_normalized_tensor[:, -1:] = limit_period(
yaw_normalized_tensor[:, -1:], period=np.pi * 2)
assert torch.allclose(yaw_normalized_tensor, expected_tensor, 1e-3)
assert torch.allclose(points, expected_points, 1e-3) assert torch.allclose(points, expected_points, 1e-3)
expected_tensor = torch.tensor( expected_tensor = torch.tensor(
[[2.5162, 1.7501, -1.7802, 3.3900, 1.6500, 1.7500, -1.6616], [[
[2.4567, 1.6357, -8.9594, 4.0100, 1.5700, 1.5400, -1.5216], 2.5162, 1.7501, -1.7802, 1.7500, 1.6500, 3.3900,
[-0.5558, 1.3033, -28.2967, 2.2300, 1.4800, 1.4700, -4.7116], 1.6616 + 0.13603681398218053 * 4 - np.pi / 2
[21.8230, 1.7361, -26.6690, 3.4800, 1.4000, 1.5600, -4.8316], ],
[8.1621, 1.6218, -31.3198, 3.7700, 1.4800, 1.7400, -0.3516]]) [
2.4567, 1.6357, -8.9594, 1.5400, 1.5700, 4.0100,
1.5216 + 0.13603681398218053 * 4 - np.pi / 2
],
[
-0.5558, 1.3033, -28.2967, 1.4700, 1.4800, 2.2300,
4.7116 + 0.13603681398218053 * 4 - np.pi / 2
],
[
21.8230, 1.7361, -26.6690, 1.5600, 1.4000, 3.4800,
4.8316 + 0.13603681398218053 * 4 - np.pi / 2
],
[
8.1621, 1.6218, -31.3198, 1.7400, 1.4800, 3.7700,
0.3516 + 0.13603681398218053 * 4 - np.pi / 2
]])
boxes_flip_vert = boxes.clone() boxes_flip_vert = boxes.clone()
points = boxes_flip_vert.flip('vertical', points) points = boxes_flip_vert.flip('vertical', points)
expected_points = torch.tensor([[-0.6762, 1.4658, -1.2559], expected_points = torch.tensor([[-0.6762, 1.4658, -1.2559],
...@@ -719,19 +836,38 @@ def test_camera_boxes3d(): ...@@ -719,19 +836,38 @@ def test_camera_boxes3d():
[0.2517, 0.9697, -6.7053], [0.2517, 0.9697, -6.7053],
[-0.5520, 0.5265, -0.6533], [-0.5520, 0.5265, -0.6533],
[0.5358, 1.4741, -4.5870]]) [0.5358, 1.4741, -4.5870]])
assert torch.allclose(boxes_flip_vert.tensor, expected_tensor, 1e-4)
yaw_normalized_tensor = boxes_flip_vert.tensor.clone()
yaw_normalized_tensor[:, -1:] = limit_period(
yaw_normalized_tensor[:, -1:], period=np.pi * 2)
expected_tensor[:, -1:] = limit_period(
expected_tensor[:, -1:], period=np.pi * 2)
assert torch.allclose(yaw_normalized_tensor, expected_tensor, 1e-4)
assert torch.allclose(points, expected_points) assert torch.allclose(points, expected_points)
# test box rotation # test box rotation
# with input torch.Tensor points and angle # with input torch.Tensor points and angle
expected_tensor = Box3DMode.convert( expected_tensor = Box3DMode.convert(
torch.tensor( torch.tensor([[
[[1.4225, -2.7344, -1.7501, 1.7500, 3.3900, 1.6500, 1.7976], 1.4225, -2.7344, -1.7501, 1.7500, 3.3900, 1.6500,
[8.5435, -3.6491, -1.6357, 1.5400, 4.0100, 1.5700, 1.6576], 1.7976 + 0.13603681398218053 * 2 - np.pi
[28.1106, -3.2869, -1.3033, 1.4700, 2.2300, 1.4800, 4.8476], ],
[23.4630, -25.2382, -1.7361, 1.5600, 3.4800, 1.4000, 4.9676], [
[29.9235, -12.3342, -1.6218, 1.7400, 3.7700, 1.4800, 0.4876]]), 8.5435, -3.6491, -1.6357, 1.5400, 4.0100, 1.5700,
Box3DMode.LIDAR, Box3DMode.CAM) 1.6576 + 0.13603681398218053 * 2 - np.pi
],
[
28.1106, -3.2869, -1.3033, 1.4700, 2.2300, 1.4800,
4.8476 + 0.13603681398218053 * 2 - np.pi
],
[
23.4630, -25.2382, -1.7361, 1.5600, 3.4800, 1.4000,
4.9676 + 0.13603681398218053 * 2 - np.pi
],
[
29.9235, -12.3342, -1.6218, 1.7400, 3.7700, 1.4800,
0.4876 + 0.13603681398218053 * 2 - np.pi
]]), Box3DMode.LIDAR, Box3DMode.CAM)
points, rot_mat_T = boxes.rotate(torch.tensor(0.13603681398218053), points) points, rot_mat_T = boxes.rotate(torch.tensor(0.13603681398218053), points)
expected_points = torch.tensor([[-0.8403, 1.4658, -1.1526], expected_points = torch.tensor([[-0.8403, 1.4658, -1.1526],
[-1.5187, 1.3857, -4.6181], [-1.5187, 1.3857, -4.6181],
...@@ -741,7 +877,12 @@ def test_camera_boxes3d(): ...@@ -741,7 +877,12 @@ def test_camera_boxes3d():
expected_rot_mat_T = torch.tensor([[0.9908, 0.0000, -0.1356], expected_rot_mat_T = torch.tensor([[0.9908, 0.0000, -0.1356],
[0.0000, 1.0000, 0.0000], [0.0000, 1.0000, 0.0000],
[0.1356, 0.0000, 0.9908]]) [0.1356, 0.0000, 0.9908]])
assert torch.allclose(boxes.tensor, expected_tensor, 1e-3) yaw_normalized_tensor = boxes.tensor.clone()
yaw_normalized_tensor[:, -1:] = limit_period(
yaw_normalized_tensor[:, -1:], period=np.pi * 2)
expected_tensor[:, -1:] = limit_period(
expected_tensor[:, -1:], period=np.pi * 2)
assert torch.allclose(yaw_normalized_tensor, expected_tensor, 1e-3)
assert torch.allclose(points, expected_points, 1e-3) assert torch.allclose(points, expected_points, 1e-3)
assert torch.allclose(rot_mat_T, expected_rot_mat_T, 1e-3) assert torch.allclose(rot_mat_T, expected_rot_mat_T, 1e-3)
...@@ -751,7 +892,10 @@ def test_camera_boxes3d(): ...@@ -751,7 +892,10 @@ def test_camera_boxes3d():
rot_mat = np.array([[0.99076125, 0., -0.13561762], [0., 1., 0.], rot_mat = np.array([[0.99076125, 0., -0.13561762], [0., 1., 0.],
[0.13561762, 0., 0.99076125]]) [0.13561762, 0., 0.99076125]])
points, rot_mat_T = boxes.rotate(rot_mat, points) points, rot_mat_T = boxes.rotate(rot_mat, points)
assert torch.allclose(boxes.tensor, expected_tensor, 1e-3) yaw_normalized_tensor = boxes.tensor.clone()
yaw_normalized_tensor[:, -1:] = limit_period(
yaw_normalized_tensor[:, -1:], period=np.pi * 2)
assert torch.allclose(yaw_normalized_tensor, expected_tensor, 1e-3)
assert torch.allclose(points, expected_points, 1e-3) assert torch.allclose(points, expected_points, 1e-3)
assert torch.allclose(rot_mat_T, expected_rot_mat_T, 1e-3) assert torch.allclose(rot_mat_T, expected_rot_mat_T, 1e-3)
...@@ -788,51 +932,61 @@ def test_camera_boxes3d(): ...@@ -788,51 +932,61 @@ def test_camera_boxes3d():
expected_tensor = Box3DMode.convert( expected_tensor = Box3DMode.convert(
torch.tensor([[ torch.tensor([[
1.0443488, -2.9183323, -1.7599131, 1.7597977, 3.4089797, 1.6592377, 1.0443488, -2.9183323, -1.7599131, 1.7597977, 3.4089797, 1.6592377,
1.9336663 1.9336663 - np.pi
], ],
[ [
8.014273, -4.8007393, -1.6448704, 1.5486219, 8.014273, -4.8007393, -1.6448704, 1.5486219,
4.0324507, 1.57879, 1.7936664 4.0324507, 1.57879, 1.7936664 - np.pi
], ],
[ [
27.558605, -7.1084175, -1.310622, 1.4782301, 27.558605, -7.1084175, -1.310622, 1.4782301,
2.242485, 1.488286, 4.9836664 2.242485, 1.488286, 4.9836664 - np.pi
], ],
[ [
19.934517, -28.344835, -1.7457767, 1.5687338, 19.934517, -28.344835, -1.7457767, 1.5687338,
3.4994833, 1.4078381, 5.1036663 3.4994833, 1.4078381, 5.1036663 - np.pi
], ],
[ [
28.130915, -16.369587, -1.6308585, 1.7497417, 28.130915, -16.369587, -1.6308585, 1.7497417,
3.791107, 1.488286, 0.6236664 3.791107, 1.488286, 0.6236664 - np.pi
]]), Box3DMode.LIDAR, Box3DMode.CAM) ]]), Box3DMode.LIDAR, Box3DMode.CAM)
boxes.scale(1.00559866335275) boxes.scale(1.00559866335275)
assert torch.allclose(boxes.tensor, expected_tensor) yaw_normalized_tensor = boxes.tensor.clone()
yaw_normalized_tensor[:, -1:] = limit_period(
yaw_normalized_tensor[:, -1:], period=np.pi * 2)
expected_tensor[:, -1:] = limit_period(
expected_tensor[:, -1:], period=np.pi * 2)
assert torch.allclose(yaw_normalized_tensor, expected_tensor)
# test box translation # test box translation
expected_tensor = Box3DMode.convert( expected_tensor = Box3DMode.convert(
torch.tensor([[ torch.tensor([[
1.1281544, -3.0507944, -1.9169292, 1.7597977, 3.4089797, 1.6592377, 1.1281544, -3.0507944, -1.9169292, 1.7597977, 3.4089797, 1.6592377,
1.9336663 1.9336663 - np.pi
], ],
[ [
8.098079, -4.9332013, -1.8018866, 1.5486219, 8.098079, -4.9332013, -1.8018866, 1.5486219,
4.0324507, 1.57879, 1.7936664 4.0324507, 1.57879, 1.7936664 - np.pi
], ],
[ [
27.64241, -7.2408795, -1.4676381, 1.4782301, 27.64241, -7.2408795, -1.4676381, 1.4782301,
2.242485, 1.488286, 4.9836664 2.242485, 1.488286, 4.9836664 - np.pi
], ],
[ [
20.018322, -28.477297, -1.9027928, 1.5687338, 20.018322, -28.477297, -1.9027928, 1.5687338,
3.4994833, 1.4078381, 5.1036663 3.4994833, 1.4078381, 5.1036663 - np.pi
], ],
[ [
28.21472, -16.502048, -1.7878747, 1.7497417, 28.21472, -16.502048, -1.7878747, 1.7497417,
3.791107, 1.488286, 0.6236664 3.791107, 1.488286, 0.6236664 - np.pi
]]), Box3DMode.LIDAR, Box3DMode.CAM) ]]), Box3DMode.LIDAR, Box3DMode.CAM)
boxes.translate(torch.tensor([0.13246193, 0.15701613, 0.0838056])) boxes.translate(torch.tensor([0.13246193, 0.15701613, 0.0838056]))
assert torch.allclose(boxes.tensor, expected_tensor) yaw_normalized_tensor = boxes.tensor.clone()
yaw_normalized_tensor[:, -1:] = limit_period(
yaw_normalized_tensor[:, -1:], period=np.pi * 2)
expected_tensor[:, -1:] = limit_period(
expected_tensor[:, -1:], period=np.pi * 2)
assert torch.allclose(yaw_normalized_tensor, expected_tensor)
# test bbox in_range_bev # test bbox in_range_bev
expected_tensor = torch.tensor([1, 1, 1, 1, 1], dtype=torch.bool) expected_tensor = torch.tensor([1, 1, 1, 1, 1], dtype=torch.bool)
...@@ -858,13 +1012,13 @@ def test_camera_boxes3d(): ...@@ -858,13 +1012,13 @@ def test_camera_boxes3d():
assert (boxes.tensor[:, 6] >= -np.pi / 2).all() assert (boxes.tensor[:, 6] >= -np.pi / 2).all()
Box3DMode.convert(boxes, Box3DMode.LIDAR, Box3DMode.LIDAR) Box3DMode.convert(boxes, Box3DMode.LIDAR, Box3DMode.LIDAR)
expected_tesor = boxes.tensor.clone() expected_tensor = boxes.tensor.clone()
assert torch.allclose(expected_tesor, boxes.tensor) assert torch.allclose(expected_tensor, boxes.tensor)
boxes.flip() boxes.flip()
boxes.flip() boxes.flip()
boxes.limit_yaw() boxes.limit_yaw()
assert torch.allclose(expected_tesor, boxes.tensor) assert torch.allclose(expected_tensor, boxes.tensor)
# test nearest_bev # test nearest_bev
# BEV box in lidar coordinates (x, y) # BEV box in lidar coordinates (x, y)
...@@ -878,54 +1032,66 @@ def test_camera_boxes3d(): ...@@ -878,54 +1032,66 @@ def test_camera_boxes3d():
expected_tensor = lidar_expected_tensor.clone() expected_tensor = lidar_expected_tensor.clone()
expected_tensor[:, 0::2] = -lidar_expected_tensor[:, [3, 1]] expected_tensor[:, 0::2] = -lidar_expected_tensor[:, [3, 1]]
expected_tensor[:, 1::2] = lidar_expected_tensor[:, 0::2] expected_tensor[:, 1::2] = lidar_expected_tensor[:, 0::2]
# the pytorch print loses some precision
assert torch.allclose( assert torch.allclose(
boxes.nearest_bev, expected_tensor, rtol=1e-4, atol=1e-7) boxes.nearest_bev, expected_tensor, rtol=1e-4, atol=1e-7)
# obtained by the print of the original implementation expected_tensor = torch.tensor([[[2.8332e+00, 2.5769e-01, -7.7767e-01],
expected_tensor = torch.tensor([[[3.2684e+00, 2.5769e-01, -7.7767e-01], [1.6232e+00, 2.5769e-01, 2.4093e+00],
[1.6232e+00, 2.5769e-01, -1.5301e-01], [1.6232e+00, 1.9169e+00, 2.4093e+00],
[1.6232e+00, 1.9169e+00, -1.5301e-01], [2.8332e+00, 1.9169e+00, -7.7767e-01],
[3.2684e+00, 1.9169e+00, -7.7767e-01], [4.4784e+00, 2.5769e-01, -1.5302e-01],
[4.4784e+00, 2.5769e-01, 2.4093e+00], [3.2684e+00, 2.5769e-01, 3.0340e+00],
[2.8332e+00, 2.5769e-01, 3.0340e+00], [3.2684e+00, 1.9169e+00, 3.0340e+00],
[2.8332e+00, 1.9169e+00, 3.0340e+00], [4.4784e+00, 1.9169e+00, -1.5302e-01]],
[4.4784e+00, 1.9169e+00, 2.4093e+00]], [[4.6237e+00, 2.2310e-01, 5.9606e+00],
[[5.2427e+00, 2.2310e-01, 5.9606e+00], [3.7324e+00, 2.2310e-01, 9.8933e+00],
[3.7324e+00, 2.2310e-01, 6.3029e+00], [3.7324e+00, 1.8019e+00, 9.8933e+00],
[3.7324e+00, 1.8019e+00, 6.3029e+00], [4.6237e+00, 1.8019e+00, 5.9606e+00],
[5.2427e+00, 1.8019e+00, 5.9606e+00], [6.1340e+00, 2.2310e-01, 6.3029e+00],
[6.1340e+00, 2.2310e-01, 9.8933e+00], [5.2427e+00, 2.2310e-01, 1.0236e+01],
[4.6237e+00, 2.2310e-01, 1.0236e+01], [5.2427e+00, 1.8019e+00, 1.0236e+01],
[4.6237e+00, 1.8019e+00, 1.0236e+01], [6.1340e+00, 1.8019e+00, 6.3029e+00]],
[6.1340e+00, 1.8019e+00, 9.8933e+00]], [[6.8292e+00, -2.0648e-02, 2.6364e+01],
[[7.6525e+00, -2.0648e-02, 2.6364e+01], [6.2283e+00, -2.0648e-02, 2.8525e+01],
[6.2283e+00, -2.0648e-02, 2.6760e+01], [6.2283e+00, 1.4676e+00, 2.8525e+01],
[6.2283e+00, 1.4676e+00, 2.6760e+01], [6.8292e+00, 1.4676e+00, 2.6364e+01],
[7.6525e+00, 1.4676e+00, 2.6364e+01], [8.2534e+00, -2.0648e-02, 2.6760e+01],
[8.2534e+00, -2.0648e-02, 2.8525e+01], [7.6525e+00, -2.0648e-02, 2.8921e+01],
[6.8292e+00, -2.0648e-02, 2.8921e+01], [7.6525e+00, 1.4676e+00, 2.8921e+01],
[6.8292e+00, 1.4676e+00, 2.8921e+01], [8.2534e+00, 1.4676e+00, 2.6760e+01]],
[8.2534e+00, 1.4676e+00, 2.8525e+01]], [[2.8420e+01, 4.9495e-01, 1.8102e+01],
[[2.8535e+01, 4.9495e-01, 1.8102e+01], [2.7085e+01, 4.9495e-01, 2.1337e+01],
[2.7085e+01, 4.9495e-01, 1.8700e+01], [2.7085e+01, 1.9028e+00, 2.1337e+01],
[2.7085e+01, 1.9028e+00, 1.8700e+01], [2.8420e+01, 1.9028e+00, 1.8102e+01],
[2.8535e+01, 1.9028e+00, 1.8102e+01], [2.9870e+01, 4.9495e-01, 1.8700e+01],
[2.9870e+01, 4.9495e-01, 2.1337e+01], [2.8535e+01, 4.9495e-01, 2.1935e+01],
[2.8420e+01, 4.9495e-01, 2.1935e+01], [2.8535e+01, 1.9028e+00, 2.1935e+01],
[2.8420e+01, 1.9028e+00, 2.1935e+01], [2.9870e+01, 1.9028e+00, 1.8700e+01]],
[2.9870e+01, 1.9028e+00, 2.1337e+01]], [[1.4452e+01, 2.9959e-01, 2.7818e+01],
[[1.4452e+01, 2.9959e-01, 2.8612e+01], [1.7530e+01, 2.9959e-01, 3.0032e+01],
[1.5474e+01, 2.9959e-01, 3.0032e+01], [1.7530e+01, 1.7879e+00, 3.0032e+01],
[1.5474e+01, 1.7879e+00, 3.0032e+01], [1.4452e+01, 1.7879e+00, 2.7818e+01],
[1.4452e+01, 1.7879e+00, 2.8612e+01], [1.5474e+01, 2.9959e-01, 2.6398e+01],
[1.7530e+01, 2.9959e-01, 2.6398e+01], [1.8552e+01, 2.9959e-01, 2.8612e+01],
[1.8552e+01, 2.9959e-01, 2.7818e+01], [1.8552e+01, 1.7879e+00, 2.8612e+01],
[1.8552e+01, 1.7879e+00, 2.7818e+01], [1.5474e+01, 1.7879e+00, 2.6398e+01]]])
[1.7530e+01, 1.7879e+00, 2.6398e+01]]])
assert torch.allclose(boxes.corners, expected_tensor, rtol=1e-3, atol=1e-4)
# the pytorch print loses some precision
assert torch.allclose(boxes.corners, expected_tensor, rtol=1e-4, atol=1e-7) th_boxes = torch.tensor(
[[
28.29669987, -0.5557558, -1.30332506, 1.47000003, 2.23000002,
1.48000002, -1.57000005
],
[
26.66901946, 21.82302134, -1.73605708, 1.55999994, 3.48000002,
1.39999998, -1.69000006
],
[
31.31977974, 8.16214412, -1.62177875, 1.74000001, 3.76999998,
1.48000002, 2.78999996
]],
dtype=torch.float32)
# test init with a given origin # test init with a given origin
boxes_origin_given = CameraInstance3DBoxes( boxes_origin_given = CameraInstance3DBoxes(
...@@ -948,17 +1114,17 @@ def test_boxes3d_overlaps(): ...@@ -948,17 +1114,17 @@ def test_boxes3d_overlaps():
# Test LiDAR boxes 3D overlaps # Test LiDAR boxes 3D overlaps
boxes1_tensor = torch.tensor( boxes1_tensor = torch.tensor(
[[1.8, -2.5, -1.8, 1.75, 3.39, 1.65, 1.6615927], [[1.8, -2.5, -1.8, 1.75, 3.39, 1.65, -1.6615927],
[8.9, -2.5, -1.6, 1.54, 4.01, 1.57, 1.5215927], [8.9, -2.5, -1.6, 1.54, 4.01, 1.57, -1.5215927],
[28.3, 0.5, -1.3, 1.47, 2.23, 1.48, 4.7115927], [28.3, 0.5, -1.3, 1.47, 2.23, 1.48, -4.7115927],
[31.3, -8.2, -1.6, 1.74, 3.77, 1.48, 0.35]], [31.3, -8.2, -1.6, 1.74, 3.77, 1.48, -0.35]],
device='cuda') device='cuda')
boxes1 = LiDARInstance3DBoxes(boxes1_tensor) boxes1 = LiDARInstance3DBoxes(boxes1_tensor)
boxes2_tensor = torch.tensor([[1.2, -3.0, -1.9, 1.8, 3.4, 1.7, 1.9], boxes2_tensor = torch.tensor([[1.2, -3.0, -1.9, 1.8, 3.4, 1.7, -1.9],
[8.1, -2.9, -1.8, 1.5, 4.1, 1.6, 1.8], [8.1, -2.9, -1.8, 1.5, 4.1, 1.6, -1.8],
[31.3, -8.2, -1.6, 1.74, 3.77, 1.48, 0.35], [31.3, -8.2, -1.6, 1.74, 3.77, 1.48, -0.35],
[20.1, -28.5, -1.9, 1.6, 3.5, 1.4, 5.1]], [20.1, -28.5, -1.9, 1.6, 3.5, 1.4, -5.1]],
device='cuda') device='cuda')
boxes2 = LiDARInstance3DBoxes(boxes2_tensor) boxes2 = LiDARInstance3DBoxes(boxes2_tensor)
...@@ -1101,6 +1267,7 @@ def test_depth_boxes3d(): ...@@ -1101,6 +1267,7 @@ def test_depth_boxes3d():
[-2.4016, -3.2521, 0.4426, 0.8234, 0.5325, 1.0099, -0.1215], [-2.4016, -3.2521, 0.4426, 0.8234, 0.5325, 1.0099, -0.1215],
[-2.5181, -2.5298, -0.4321, 0.8597, 0.6193, 1.0204, -0.0493], [-2.5181, -2.5298, -0.4321, 0.8597, 0.6193, 1.0204, -0.0493],
[-1.5434, -2.4951, -0.5570, 0.9385, 2.1404, 0.8954, -0.0585]]) [-1.5434, -2.4951, -0.5570, 0.9385, 2.1404, 0.8954, -0.0585]])
expected_tensor[:, -1:] -= 0.022998953275003075 * 2
points, rot_mat_T = boxes_rot.rotate(-0.022998953275003075, points) points, rot_mat_T = boxes_rot.rotate(-0.022998953275003075, points)
expected_points = torch.tensor([[-0.7049, -1.2400, -1.4658, 2.5359], expected_points = torch.tensor([[-0.7049, -1.2400, -1.4658, 2.5359],
[-0.9881, -4.7599, -1.3857, 0.7167], [-0.9881, -4.7599, -1.3857, 0.7167],
...@@ -1115,10 +1282,13 @@ def test_depth_boxes3d(): ...@@ -1115,10 +1282,13 @@ def test_depth_boxes3d():
assert torch.allclose(rot_mat_T, expected_rot_mat_T, 1e-3) assert torch.allclose(rot_mat_T, expected_rot_mat_T, 1e-3)
# with input torch.Tensor points and rotation matrix # with input torch.Tensor points and rotation matrix
points, rot_mat_T = boxes.rotate(0.022998953275003075, points) # back points, rot_mat_T = boxes.rotate(-0.022998953275003075, points) # back
rot_mat = np.array([[0.99973554, 0.02299693, 0.], rot_mat = np.array([[0.99973554, 0.02299693, 0.],
[-0.02299693, 0.99973554, 0.], [0., 0., 1.]]) [-0.02299693, 0.99973554, 0.], [0., 0., 1.]])
points, rot_mat_T = boxes.rotate(rot_mat, points) points, rot_mat_T = boxes.rotate(rot_mat, points)
expected_rot_mat_T = torch.tensor([[0.99973554, 0.02299693, 0.0000],
[-0.02299693, 0.99973554, 0.0000],
[0.0000, 0.0000, 1.0000]])
assert torch.allclose(boxes_rot.tensor, expected_tensor, 1e-3) assert torch.allclose(boxes_rot.tensor, expected_tensor, 1e-3)
assert torch.allclose(points, expected_points, 1e-3) assert torch.allclose(points, expected_points, 1e-3)
assert torch.allclose(rot_mat_T, expected_rot_mat_T, 1e-3) assert torch.allclose(rot_mat_T, expected_rot_mat_T, 1e-3)
...@@ -1135,27 +1305,64 @@ def test_depth_boxes3d(): ...@@ -1135,27 +1305,64 @@ def test_depth_boxes3d():
[-0.0974, 6.7093, -0.9697, 0.5599], [-0.0974, 6.7093, -0.9697, 0.5599],
[0.5669, 0.6404, -0.5265, 1.0032], [0.5669, 0.6404, -0.5265, 1.0032],
[-0.4302, 4.5981, -1.4741, 0.0556]]) [-0.4302, 4.5981, -1.4741, 0.0556]])
expected_rot_mat_T_np = np.array([[0.9997, -0.0230, 0.0000], expected_rot_mat_T_np = np.array([[0.99973554, -0.02299693, 0.0000],
[0.0230, 0.9997, 0.0000], [0.02299693, 0.99973554, 0.0000],
[0.0000, 0.0000, 1.0000]]) [0.0000, 0.0000, 1.0000]])
expected_tensor = torch.tensor( expected_tensor = torch.tensor(
[[-1.5434, -2.4951, -0.5570, 0.9385, 2.1404, 0.8954, -0.0585], [[-1.5434, -2.4951, -0.5570, 0.9385, 2.1404, 0.8954, -0.0585],
[-2.4016, -3.2521, 0.4426, 0.8234, 0.5325, 1.0099, -0.1215], [-2.4016, -3.2521, 0.4426, 0.8234, 0.5325, 1.0099, -0.1215],
[-2.5181, -2.5298, -0.4321, 0.8597, 0.6193, 1.0204, -0.0493], [-2.5181, -2.5298, -0.4321, 0.8597, 0.6193, 1.0204, -0.0493],
[-1.5434, -2.4951, -0.5570, 0.9385, 2.1404, 0.8954, -0.0585]]) [-1.5434, -2.4951, -0.5570, 0.9385, 2.1404, 0.8954, -0.0585]])
expected_tensor[:, -1:] -= 0.022998953275003075 * 2
assert torch.allclose(boxes.tensor, expected_tensor, 1e-3) assert torch.allclose(boxes.tensor, expected_tensor, 1e-3)
assert np.allclose(points_np, expected_points_np, 1e-3) assert np.allclose(points_np, expected_points_np, 1e-3)
assert np.allclose(rot_mat_T_np, expected_rot_mat_T_np, 1e-3) assert np.allclose(rot_mat_T_np, expected_rot_mat_T_np, 1e-3)
# with input DepthPoints and rotation matrix # with input DepthPoints and rotation matrix
points_np, rot_mat_T_np = boxes.rotate(0.022998953275003075, points_np) points_np, rot_mat_T_np = boxes.rotate(-0.022998953275003075, points_np)
depth_points = DepthPoints(points_np, points_dim=4) depth_points = DepthPoints(points_np, points_dim=4)
depth_points, rot_mat_T_np = boxes.rotate(rot_mat, depth_points) depth_points, rot_mat_T_np = boxes.rotate(rot_mat, depth_points)
points_np = depth_points.tensor.numpy() points_np = depth_points.tensor.numpy()
expected_rot_mat_T_np = expected_rot_mat_T_np.T
assert torch.allclose(boxes.tensor, expected_tensor, 1e-3) assert torch.allclose(boxes.tensor, expected_tensor, 1e-3)
assert np.allclose(points_np, expected_points_np, 1e-3) assert np.allclose(points_np, expected_points_np, 1e-3)
assert np.allclose(rot_mat_T_np, expected_rot_mat_T_np, 1e-3) assert np.allclose(rot_mat_T_np, expected_rot_mat_T_np, 1e-3)
expected_tensor = torch.tensor([[[-2.1217, -3.5105, -0.5570],
[-2.1217, -3.5105, 0.3384],
[-1.8985, -1.3818, 0.3384],
[-1.8985, -1.3818, -0.5570],
[-1.1883, -3.6084, -0.5570],
[-1.1883, -3.6084, 0.3384],
[-0.9651, -1.4796, 0.3384],
[-0.9651, -1.4796, -0.5570]],
[[-2.8519, -3.4460, 0.4426],
[-2.8519, -3.4460, 1.4525],
[-2.7632, -2.9210, 1.4525],
[-2.7632, -2.9210, 0.4426],
[-2.0401, -3.5833, 0.4426],
[-2.0401, -3.5833, 1.4525],
[-1.9513, -3.0582, 1.4525],
[-1.9513, -3.0582, 0.4426]],
[[-2.9755, -2.7971, -0.4321],
[-2.9755, -2.7971, 0.5883],
[-2.9166, -2.1806, 0.5883],
[-2.9166, -2.1806, -0.4321],
[-2.1197, -2.8789, -0.4321],
[-2.1197, -2.8789, 0.5883],
[-2.0608, -2.2624, 0.5883],
[-2.0608, -2.2624, -0.4321]],
[[-2.1217, -3.5105, -0.5570],
[-2.1217, -3.5105, 0.3384],
[-1.8985, -1.3818, 0.3384],
[-1.8985, -1.3818, -0.5570],
[-1.1883, -3.6084, -0.5570],
[-1.1883, -3.6084, 0.3384],
[-0.9651, -1.4796, 0.3384],
[-0.9651, -1.4796, -0.5570]]])
assert torch.allclose(boxes.corners, expected_tensor, 1e-3)
th_boxes = torch.tensor( th_boxes = torch.tensor(
[[0.61211395, 0.8129094, 0.10563634, 1.497534, 0.16927195, 0.27956772], [[0.61211395, 0.8129094, 0.10563634, 1.497534, 0.16927195, 0.27956772],
[1.430009, 0.49797538, 0.9382923, 0.07694054, 0.9312509, 1.8919173]], [1.430009, 0.49797538, 0.9382923, 0.07694054, 0.9312509, 1.8919173]],
...@@ -1198,11 +1405,11 @@ def test_depth_boxes3d(): ...@@ -1198,11 +1405,11 @@ def test_depth_boxes3d():
[1.5112, -0.0352, 2.8302], [1.5112, -0.0352, 2.8302],
[1.5112, 0.8986, 2.8302], [1.5112, 0.8986, 2.8302],
[1.5112, 0.8986, 0.9383]]]) [1.5112, 0.8986, 0.9383]]])
torch.allclose(boxes.corners, expected_tensor) assert torch.allclose(boxes.corners, expected_tensor, 1e-3)
# test points in boxes # test points in boxes
if torch.cuda.is_available(): if torch.cuda.is_available():
box_idxs_of_pts = boxes.points_in_boxes(points.cuda()) box_idxs_of_pts = boxes.points_in_boxes_batch(points.cuda())
expected_idxs_of_pts = torch.tensor( expected_idxs_of_pts = torch.tensor(
[[0, 0], [0, 0], [0, 0], [0, 0], [0, 0]], [[0, 0], [0, 0], [0, 0], [0, 0], [0, 0]],
device='cuda:0', device='cuda:0',
...@@ -1211,8 +1418,8 @@ def test_depth_boxes3d(): ...@@ -1211,8 +1418,8 @@ def test_depth_boxes3d():
# test get_surface_line_center # test get_surface_line_center
boxes = torch.tensor( boxes = torch.tensor(
[[0.3294, 1.0359, 0.1171, 1.0822, 1.1247, 1.3721, 0.4916], [[0.3294, 1.0359, 0.1171, 1.0822, 1.1247, 1.3721, -0.4916],
[-2.4630, -2.6324, -0.1616, 0.9202, 1.7896, 0.1992, 0.3185]]) [-2.4630, -2.6324, -0.1616, 0.9202, 1.7896, 0.1992, -0.3185]])
boxes = DepthInstance3DBoxes( boxes = DepthInstance3DBoxes(
boxes, box_dim=boxes.shape[-1], with_yaw=True, origin=(0.5, 0.5, 0.5)) boxes, box_dim=boxes.shape[-1], with_yaw=True, origin=(0.5, 0.5, 0.5))
surface_center, line_center = boxes.get_surface_line_center() surface_center, line_center = boxes.get_surface_line_center()
...@@ -1260,22 +1467,97 @@ def test_depth_boxes3d(): ...@@ -1260,22 +1467,97 @@ def test_depth_boxes3d():
def test_rotation_3d_in_axis(): def test_rotation_3d_in_axis():
# # clockwise
# points = torch.tensor([[[-0.4599, -0.0471, 0.0000],
# [-0.4599, -0.0471, 1.8433],
# [-0.4599, 0.0471, 1.8433]],
# [[-0.2555, -0.2683, 0.0000],
# [-0.2555, -0.2683, 0.9072],
# [-0.2555, 0.2683, 0.9072]]])
# rotated = rotation_3d_in_axis(
# points, torch.tensor([-np.pi / 10, np.pi / 10]),
# axis=0, clockwise=True)
# expected_rotated = torch.tensor([[[0.0000, -0.4228, -0.1869],
# [1.8433, -0.4228, -0.1869],
# [1.8433, -0.4519, -0.0973]],
# [[0.0000, -0.3259, -0.1762],
# [0.9072, -0.3259, -0.1762],
# [0.9072, -0.1601, 0.3341]]])
# assert torch.allclose(rotated, expected_rotated, 1e-3)
# anti-clockwise with return rotation mat
points = torch.tensor([[[-0.4599, -0.0471, 0.0000], points = torch.tensor([[[-0.4599, -0.0471, 0.0000],
[-0.4599, -0.0471, 1.8433], [-0.4599, -0.0471, 1.8433]]])
[-0.4599, 0.0471, 1.8433]], rotated = rotation_3d_in_axis(points, torch.tensor([np.pi / 2]), axis=0)
[[-0.2555, -0.2683, 0.0000], expected_rotated = torch.tensor([[[-0.4599, 0.0000, -0.0471],
[-0.2555, -0.2683, 0.9072], [-0.4599, -1.8433, -0.0471]]])
[-0.2555, 0.2683, 0.9072]]])
rotated = rotation_3d_in_axis(
points, torch.tensor([-np.pi / 10, np.pi / 10]), axis=0)
expected_rotated = torch.tensor([[[0.0000, -0.4228, -0.1869],
[1.8433, -0.4228, -0.1869],
[1.8433, -0.4519, -0.0973]],
[[0.0000, -0.3259, -0.1762],
[0.9072, -0.3259, -0.1762],
[0.9072, -0.1601, 0.3341]]])
assert torch.allclose(rotated, expected_rotated, 1e-3) assert torch.allclose(rotated, expected_rotated, 1e-3)
points = torch.tensor([[[-0.4599, -0.0471, 0.0000],
[-0.4599, -0.0471, 1.8433]]])
rotated, mat = rotation_3d_in_axis(
points, torch.tensor([np.pi / 2]), axis=0, return_mat=True)
expected_rotated = torch.tensor([[[-0.4599, 0.0000, -0.0471],
[-0.4599, -1.8433, -0.0471]]])
expected_mat = torch.tensor([[[1, 0, 0], [0, 0, 1], [0, -1, 0]]]).float()
assert torch.allclose(rotated, expected_rotated, atol=1e-6)
assert torch.allclose(mat, expected_mat, atol=1e-6)
points = torch.tensor([[[-0.4599, -0.0471, 0.0000],
[-0.4599, -0.0471, 1.8433]],
[[-0.2555, -0.2683, 0.0000],
[-0.2555, -0.2683, 0.9072]]])
rotated = rotation_3d_in_axis(points, np.pi / 2, axis=0)
expected_rotated = torch.tensor([[[-0.4599, 0.0000, -0.0471],
[-0.4599, -1.8433, -0.0471]],
[[-0.2555, 0.0000, -0.2683],
[-0.2555, -0.9072, -0.2683]]])
assert torch.allclose(rotated, expected_rotated, atol=1e-3)
points = np.array([[[-0.4599, -0.0471, 0.0000], [-0.4599, -0.0471,
1.8433]],
[[-0.2555, -0.2683, 0.0000],
[-0.2555, -0.2683, 0.9072]]]).astype(np.float32)
rotated = rotation_3d_in_axis(points, np.pi / 2, axis=0)
expected_rotated = np.array([[[-0.4599, 0.0000, -0.0471],
[-0.4599, -1.8433, -0.0471]],
[[-0.2555, 0.0000, -0.2683],
[-0.2555, -0.9072, -0.2683]]])
assert np.allclose(rotated, expected_rotated, atol=1e-3)
points = torch.tensor([[[-0.4599, -0.0471, 0.0000],
[-0.4599, -0.0471, 1.8433]],
[[-0.2555, -0.2683, 0.0000],
[-0.2555, -0.2683, 0.9072]]])
angles = [np.pi / 2, -np.pi / 2]
rotated = rotation_3d_in_axis(points, angles, axis=0)
expected_rotated = np.array([[[-0.4599, 0.0000, -0.0471],
[-0.4599, -1.8433, -0.0471]],
[[-0.2555, 0.0000, 0.2683],
[-0.2555, 0.9072, 0.2683]]])
assert np.allclose(rotated, expected_rotated, atol=1e-3)
points = torch.tensor([[[-0.0471, 0.0000], [-0.0471, 1.8433]],
[[-0.2683, 0.0000], [-0.2683, 0.9072]]])
angles = [np.pi / 2, -np.pi / 2]
rotated = rotation_3d_in_axis(points, angles)
expected_rotated = np.array([[[0.0000, -0.0471], [-1.8433, -0.0471]],
[[0.0000, 0.2683], [0.9072, 0.2683]]])
assert np.allclose(rotated, expected_rotated, atol=1e-3)
def test_rotation_2d():
angles = np.array([3.14])
corners = np.array([[[-0.235, -0.49], [-0.235, 0.49], [0.235, 0.49],
[0.235, -0.49]]])
corners_rotated = rotation_3d_in_axis(corners, angles)
expected_corners = np.array([[[0.2357801, 0.48962511],
[0.2342193, -0.49037365],
[-0.2357801, -0.48962511],
[-0.2342193, 0.49037365]]])
assert np.allclose(corners_rotated, expected_corners)
def test_limit_period(): def test_limit_period():
torch.manual_seed(0) torch.manual_seed(0)
...@@ -1285,6 +1567,11 @@ def test_limit_period(): ...@@ -1285,6 +1567,11 @@ def test_limit_period():
[0.3074]]) [0.3074]])
assert torch.allclose(result, expected_result, 1e-3) assert torch.allclose(result, expected_result, 1e-3)
val = val.numpy()
result = limit_period(val)
expected_result = expected_result.numpy()
assert np.allclose(result, expected_result, 1e-3)
def test_xywhr2xyxyr(): def test_xywhr2xyxyr():
torch.manual_seed(0) torch.manual_seed(0)
...@@ -1324,3 +1611,14 @@ def test_points_cam2img(): ...@@ -1324,3 +1611,14 @@ def test_points_cam2img():
[0.6994, 0.7782], [0.5623, 0.6303], [0.6994, 0.7782], [0.5623, 0.6303],
[0.4359, 0.6532]]) [0.4359, 0.6532]])
assert torch.allclose(point_2d_res, expected_point_2d_res, 1e-3) assert torch.allclose(point_2d_res, expected_point_2d_res, 1e-3)
points = points.numpy()
proj_mat = proj_mat.numpy()
point_2d_res = points_cam2img(points, proj_mat)
expected_point_2d_res = expected_point_2d_res.numpy()
assert np.allclose(point_2d_res, expected_point_2d_res, 1e-3)
points = torch.from_numpy(points)
point_2d_res = points_cam2img(points, proj_mat)
expected_point_2d_res = torch.from_numpy(expected_point_2d_res)
assert torch.allclose(point_2d_res, expected_point_2d_res, 1e-3)
...@@ -20,7 +20,7 @@ def test_camera_to_lidar(): ...@@ -20,7 +20,7 @@ def test_camera_to_lidar():
def test_box_camera_to_lidar(): def test_box_camera_to_lidar():
from mmdet3d.core.bbox.box_np_ops import box_camera_to_lidar from mmdet3d.core.bbox.box_np_ops import box_camera_to_lidar
box = np.array([[1.84, 1.47, 8.41, 1.2, 1.89, 0.48, 0.01]]) box = np.array([[1.84, 1.47, 8.41, 1.2, 1.89, 0.48, -0.01]])
rect = np.array([[0.9999128, 0.01009263, -0.00851193, 0.], rect = np.array([[0.9999128, 0.01009263, -0.00851193, 0.],
[-0.01012729, 0.9999406, -0.00403767, 0.], [-0.01012729, 0.9999406, -0.00403767, 0.],
[0.00847068, 0.00412352, 0.9999556, 0.], [0., 0., 0., [0.00847068, 0.00412352, 0.9999556, 0.], [0., 0., 0.,
...@@ -30,8 +30,9 @@ def test_box_camera_to_lidar(): ...@@ -30,8 +30,9 @@ def test_box_camera_to_lidar():
[0.9999753, 0.00693114, -0.0011439, -0.3321029], [0.9999753, 0.00693114, -0.0011439, -0.3321029],
[0., 0., 0., 1.]]) [0., 0., 0., 1.]])
box_lidar = box_camera_to_lidar(box, rect, Trv2c) box_lidar = box_camera_to_lidar(box, rect, Trv2c)
expected_box = np.array( expected_box = np.array([[
[[8.73138192, -1.85591746, -1.59969933, 0.48, 1.2, 1.89, 0.01]]) 8.73138192, -1.85591746, -1.59969933, 1.2, 0.48, 1.89, 0.01 - np.pi / 2
]])
assert np.allclose(box_lidar, expected_box) assert np.allclose(box_lidar, expected_box)
...@@ -48,22 +49,17 @@ def test_center_to_corner_box2d(): ...@@ -48,22 +49,17 @@ def test_center_to_corner_box2d():
from mmdet3d.core.bbox.box_np_ops import center_to_corner_box2d from mmdet3d.core.bbox.box_np_ops import center_to_corner_box2d
center = np.array([[9.348705, -3.6271024]]) center = np.array([[9.348705, -3.6271024]])
dims = np.array([[0.47, 0.98]]) dims = np.array([[0.47, 0.98]])
angles = np.array([-3.14]) angles = np.array([3.14])
corner = center_to_corner_box2d(center, dims, angles) corner = center_to_corner_box2d(center, dims, angles)
expected_corner = np.array([[[9.584485, -3.1374772], [9.582925, -4.117476], expected_corner = np.array([[[9.584485, -3.1374772], [9.582925, -4.117476],
[9.112926, -4.1167274], [9.112926, -4.1167274],
[9.114486, -3.1367288]]]) [9.114486, -3.1367288]]])
assert np.allclose(corner, expected_corner) assert np.allclose(corner, expected_corner)
center = np.array([[-0.0, 0.0]])
def test_rotation_2d(): dims = np.array([[4.0, 8.0]])
from mmdet3d.core.bbox.box_np_ops import rotation_2d angles = np.array([-0.785398]) # -45 degrees
angles = np.array([-3.14]) corner = center_to_corner_box2d(center, dims, angles)
corners = np.array([[[-0.235, -0.49], [-0.235, 0.49], [0.235, 0.49], expected_corner = np.array([[[-4.24264, -1.41421], [1.41421, 4.24264],
[0.235, -0.49]]]) [4.24264, 1.41421], [-1.41421, -4.24264]]])
corners_rotated = rotation_2d(corners, angles) assert np.allclose(corner, expected_corner)
expected_corners = np.array([[[0.2357801, 0.48962511],
[0.2342193, -0.49037365],
[-0.2357801, -0.48962511],
[-0.2342193, 0.49037365]]])
assert np.allclose(corners_rotated, expected_corners)
...@@ -3,7 +3,8 @@ import numpy as np ...@@ -3,7 +3,8 @@ import numpy as np
import torch import torch
from mmdet3d.core.bbox import (CameraInstance3DBoxes, Coord3DMode, from mmdet3d.core.bbox import (CameraInstance3DBoxes, Coord3DMode,
DepthInstance3DBoxes, LiDARInstance3DBoxes) DepthInstance3DBoxes, LiDARInstance3DBoxes,
limit_period)
from mmdet3d.core.points import CameraPoints, DepthPoints, LiDARPoints from mmdet3d.core.points import CameraPoints, DepthPoints, LiDARPoints
...@@ -242,22 +243,31 @@ def test_boxes_conversion(): ...@@ -242,22 +243,31 @@ def test_boxes_conversion():
convert_lidar_boxes = Coord3DMode.convert(cam_boxes, Coord3DMode.CAM, convert_lidar_boxes = Coord3DMode.convert(cam_boxes, Coord3DMode.CAM,
Coord3DMode.LIDAR) Coord3DMode.LIDAR)
expected_tensor = torch.tensor( expected_tensor = torch.tensor([[
[[-1.7501, -1.7802, -2.5162, 1.6500, 1.7500, 3.3900, 1.4800], -1.7501, -1.7802, -2.5162, 1.7500, 1.6500, 3.3900, -1.4800 - np.pi / 2
[-1.6357, -8.9594, -2.4567, 1.5700, 1.5400, 4.0100, 1.6200], ], [
[-1.3033, -28.2967, 0.5558, 1.4800, 1.4700, 2.2300, -1.5700], -1.6357, -8.9594, -2.4567, 1.5400, 1.5700, 4.0100, -1.6200 - np.pi / 2
[-1.7361, -26.6690, -21.8230, 1.4000, 1.5600, 3.4800, -1.6900], ], [-1.3033, -28.2967, 0.5558, 1.4700, 1.4800, 2.2300, 1.5700 - np.pi / 2],
[-1.6218, -31.3198, -8.1621, 1.4800, 1.7400, 3.7700, 2.7900]]) [
-1.7361, -26.6690, -21.8230, 1.5600,
1.4000, 3.4800, 1.6900 - np.pi / 2
],
[
-1.6218, -31.3198, -8.1621, 1.7400,
1.4800, 3.7700, -2.7900 - np.pi / 2
]])
expected_tensor[:, -1:] = limit_period(
expected_tensor[:, -1:], period=np.pi * 2)
assert torch.allclose(expected_tensor, convert_lidar_boxes.tensor, 1e-3) assert torch.allclose(expected_tensor, convert_lidar_boxes.tensor, 1e-3)
convert_depth_boxes = Coord3DMode.convert(cam_boxes, Coord3DMode.CAM, convert_depth_boxes = Coord3DMode.convert(cam_boxes, Coord3DMode.CAM,
Coord3DMode.DEPTH) Coord3DMode.DEPTH)
expected_tensor = torch.tensor( expected_tensor = torch.tensor(
[[1.7802, 1.7501, 2.5162, 1.7500, 1.6500, 3.3900, 1.4800], [[1.7802, 1.7501, 2.5162, 1.7500, 1.6500, 3.3900, -1.4800],
[8.9594, 1.6357, 2.4567, 1.5400, 1.5700, 4.0100, 1.6200], [8.9594, 1.6357, 2.4567, 1.5400, 1.5700, 4.0100, -1.6200],
[28.2967, 1.3033, -0.5558, 1.4700, 1.4800, 2.2300, -1.5700], [28.2967, 1.3033, -0.5558, 1.4700, 1.4800, 2.2300, 1.5700],
[26.6690, 1.7361, 21.8230, 1.5600, 1.4000, 3.4800, -1.6900], [26.6690, 1.7361, 21.8230, 1.5600, 1.4000, 3.4800, 1.6900],
[31.3198, 1.6218, 8.1621, 1.7400, 1.4800, 3.7700, 2.7900]]) [31.3198, 1.6218, 8.1621, 1.7400, 1.4800, 3.7700, -2.7900]])
assert torch.allclose(expected_tensor, convert_depth_boxes.tensor, 1e-3) assert torch.allclose(expected_tensor, convert_depth_boxes.tensor, 1e-3)
# test LIDAR to CAM and DEPTH # test LIDAR to CAM and DEPTH
...@@ -269,22 +279,42 @@ def test_boxes_conversion(): ...@@ -269,22 +279,42 @@ def test_boxes_conversion():
[31.31978, 8.162144, -1.6217787, 1.74, 3.77, 1.48, 2.79]]) [31.31978, 8.162144, -1.6217787, 1.74, 3.77, 1.48, 2.79]])
convert_cam_boxes = Coord3DMode.convert(lidar_boxes, Coord3DMode.LIDAR, convert_cam_boxes = Coord3DMode.convert(lidar_boxes, Coord3DMode.LIDAR,
Coord3DMode.CAM) Coord3DMode.CAM)
expected_tensor = torch.tensor( expected_tensor = torch.tensor([
[[-2.5162, 1.7501, 1.7802, 3.3900, 1.6500, 1.7500, 1.4800], [-2.5162, 1.7501, 1.7802, 1.7500, 1.6500, 3.3900, -1.4800 - np.pi / 2],
[-2.4567, 1.6357, 8.9594, 4.0100, 1.5700, 1.5400, 1.6200], [-2.4567, 1.6357, 8.9594, 1.5400, 1.5700, 4.0100, -1.6200 - np.pi / 2],
[0.5558, 1.3033, 28.2967, 2.2300, 1.4800, 1.4700, -1.5700], [0.5558, 1.3033, 28.2967, 1.4700, 1.4800, 2.2300, 1.5700 - np.pi / 2],
[-21.8230, 1.7361, 26.6690, 3.4800, 1.4000, 1.5600, -1.6900], [
[-8.1621, 1.6218, 31.3198, 3.7700, 1.4800, 1.7400, 2.7900]]) -21.8230, 1.7361, 26.6690, 1.5600, 1.4000, 3.4800,
1.6900 - np.pi / 2
],
[
-8.1621, 1.6218, 31.3198, 1.7400, 1.4800, 3.7700,
-2.7900 - np.pi / 2
]
])
expected_tensor[:, -1:] = limit_period(
expected_tensor[:, -1:], period=np.pi * 2)
assert torch.allclose(expected_tensor, convert_cam_boxes.tensor, 1e-3) assert torch.allclose(expected_tensor, convert_cam_boxes.tensor, 1e-3)
convert_depth_boxes = Coord3DMode.convert(lidar_boxes, Coord3DMode.LIDAR, convert_depth_boxes = Coord3DMode.convert(lidar_boxes, Coord3DMode.LIDAR,
Coord3DMode.DEPTH) Coord3DMode.DEPTH)
expected_tensor = torch.tensor( expected_tensor = torch.tensor([[
[[-2.5162, 1.7802, -1.7501, 3.3900, 1.7500, 1.6500, 1.4800], -2.5162, 1.7802, -1.7501, 1.7500, 3.3900, 1.6500, 1.4800 + np.pi / 2
[-2.4567, 8.9594, -1.6357, 4.0100, 1.5400, 1.5700, 1.6200], ], [-2.4567, 8.9594, -1.6357, 1.5400, 4.0100, 1.5700, 1.6200 + np.pi / 2],
[0.5558, 28.2967, -1.3033, 2.2300, 1.4700, 1.4800, -1.5700], [
[-21.8230, 26.6690, -1.7361, 3.4800, 1.5600, 1.4000, -1.6900], 0.5558, 28.2967, -1.3033, 1.4700,
[-8.1621, 31.3198, -1.6218, 3.7700, 1.7400, 1.4800, 2.7900]]) 2.2300, 1.4800, -1.5700 + np.pi / 2
],
[
-21.8230, 26.6690, -1.7361, 1.5600,
3.4800, 1.4000, -1.6900 + np.pi / 2
],
[
-8.1621, 31.3198, -1.6218, 1.7400,
3.7700, 1.4800, 2.7900 + np.pi / 2
]])
expected_tensor[:, -1:] = limit_period(
expected_tensor[:, -1:], period=np.pi * 2)
assert torch.allclose(expected_tensor, convert_depth_boxes.tensor, 1e-3) assert torch.allclose(expected_tensor, convert_depth_boxes.tensor, 1e-3)
# test DEPTH to CAM and LIDAR # test DEPTH to CAM and LIDAR
...@@ -297,19 +327,25 @@ def test_boxes_conversion(): ...@@ -297,19 +327,25 @@ def test_boxes_conversion():
convert_cam_boxes = Coord3DMode.convert(depth_boxes, Coord3DMode.DEPTH, convert_cam_boxes = Coord3DMode.convert(depth_boxes, Coord3DMode.DEPTH,
Coord3DMode.CAM) Coord3DMode.CAM)
expected_tensor = torch.tensor( expected_tensor = torch.tensor(
[[1.7802, -1.7501, -2.5162, 1.7500, 1.6500, 3.3900, 1.4800], [[1.7802, -1.7501, -2.5162, 1.7500, 1.6500, 3.3900, -1.4800],
[8.9594, -1.6357, -2.4567, 1.5400, 1.5700, 4.0100, 1.6200], [8.9594, -1.6357, -2.4567, 1.5400, 1.5700, 4.0100, -1.6200],
[28.2967, -1.3033, 0.5558, 1.4700, 1.4800, 2.2300, -1.5700], [28.2967, -1.3033, 0.5558, 1.4700, 1.4800, 2.2300, 1.5700],
[26.6690, -1.7361, -21.8230, 1.5600, 1.4000, 3.4800, -1.6900], [26.6690, -1.7361, -21.8230, 1.5600, 1.4000, 3.4800, 1.6900],
[31.3198, -1.6218, -8.1621, 1.7400, 1.4800, 3.7700, 2.7900]]) [31.3198, -1.6218, -8.1621, 1.7400, 1.4800, 3.7700, -2.7900]])
assert torch.allclose(expected_tensor, convert_cam_boxes.tensor, 1e-3) assert torch.allclose(expected_tensor, convert_cam_boxes.tensor, 1e-3)
convert_lidar_boxes = Coord3DMode.convert(depth_boxes, Coord3DMode.DEPTH, convert_lidar_boxes = Coord3DMode.convert(depth_boxes, Coord3DMode.DEPTH,
Coord3DMode.LIDAR) Coord3DMode.LIDAR)
expected_tensor = torch.tensor( expected_tensor = torch.tensor([[
[[2.5162, -1.7802, -1.7501, 3.3900, 1.7500, 1.6500, 1.4800], 2.5162, -1.7802, -1.7501, 1.7500, 3.3900, 1.6500, 1.4800 - np.pi / 2
[2.4567, -8.9594, -1.6357, 4.0100, 1.5400, 1.5700, 1.6200], ], [
[-0.5558, -28.2967, -1.3033, 2.2300, 1.4700, 1.4800, -1.5700], 2.4567, -8.9594, -1.6357, 1.5400, 4.0100, 1.5700, 1.6200 - np.pi / 2
[21.8230, -26.6690, -1.7361, 3.4800, 1.5600, 1.4000, -1.6900], ], [
[8.1621, -31.3198, -1.6218, 3.7700, 1.7400, 1.4800, 2.7900]]) -0.5558, -28.2967, -1.3033, 1.4700, 2.2300, 1.4800, -1.5700 - np.pi / 2
], [
21.8230, -26.6690, -1.7361, 1.5600, 3.4800, 1.4000, -1.6900 - np.pi / 2
], [8.1621, -31.3198, -1.6218, 1.7400, 3.7700, 1.4800,
2.7900 - np.pi / 2]])
expected_tensor[:, -1:] = limit_period(
expected_tensor[:, -1:], period=np.pi * 2)
assert torch.allclose(expected_tensor, convert_lidar_boxes.tensor, 1e-3) assert torch.allclose(expected_tensor, convert_lidar_boxes.tensor, 1e-3)
# Copyright (c) OpenMMLab. All rights reserved. # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import pytest
import torch import torch
from mmdet3d.core import draw_heatmap_gaussian from mmdet3d.core import array_converter, draw_heatmap_gaussian
def test_gaussian(): def test_gaussian():
...@@ -10,3 +12,169 @@ def test_gaussian(): ...@@ -10,3 +12,169 @@ def test_gaussian():
radius = 2 radius = 2
draw_heatmap_gaussian(heatmap, ct_int, radius) draw_heatmap_gaussian(heatmap, ct_int, radius)
assert torch.isclose(torch.sum(heatmap), torch.tensor(4.3505), atol=1e-3) assert torch.isclose(torch.sum(heatmap), torch.tensor(4.3505), atol=1e-3)
def test_array_converter():
# to torch
@array_converter(to_torch=True, apply_to=('array_a', 'array_b'))
def test_func_1(array_a, array_b, container):
container.append(array_a)
container.append(array_b)
return array_a.clone(), array_b.clone()
np_array_a = np.array([0.0])
np_array_b = np.array([0.0])
container = []
new_array_a, new_array_b = test_func_1(np_array_a, np_array_b, container)
assert isinstance(new_array_a, np.ndarray)
assert isinstance(new_array_b, np.ndarray)
assert isinstance(container[0], torch.Tensor)
assert isinstance(container[1], torch.Tensor)
# one to torch and one not
@array_converter(to_torch=True, apply_to=('array_a', ))
def test_func_2(array_a, array_b):
return torch.cat([array_a, array_b])
with pytest.raises(TypeError):
_ = test_func_2(np_array_a, np_array_b)
# wrong template_arg_name_
@array_converter(
to_torch=True, apply_to=('array_a', ), template_arg_name_='array_c')
def test_func_3(array_a, array_b):
return torch.cat([array_a, array_b])
with pytest.raises(ValueError):
_ = test_func_3(np_array_a, np_array_b)
# wrong apply_to
@array_converter(to_torch=True, apply_to=('array_a', 'array_c'))
def test_func_4(array_a, array_b):
return torch.cat([array_a, array_b])
with pytest.raises(ValueError):
_ = test_func_4(np_array_a, np_array_b)
# to numpy
@array_converter(to_torch=False, apply_to=('array_a', 'array_b'))
def test_func_5(array_a, array_b, container):
container.append(array_a)
container.append(array_b)
return array_a.copy(), array_b.copy()
pt_array_a = torch.tensor([0.0])
pt_array_b = torch.tensor([0.0])
container = []
new_array_a, new_array_b = test_func_5(pt_array_a, pt_array_b, container)
assert isinstance(container[0], np.ndarray)
assert isinstance(container[1], np.ndarray)
assert isinstance(new_array_a, torch.Tensor)
assert isinstance(new_array_b, torch.Tensor)
# apply_to = None
@array_converter(to_torch=False)
def test_func_6(array_a, array_b, container):
container.append(array_a)
container.append(array_b)
return array_a.clone(), array_b.clone()
container = []
new_array_a, new_array_b = test_func_6(pt_array_a, pt_array_b, container)
assert isinstance(container[0], torch.Tensor)
assert isinstance(container[1], torch.Tensor)
assert isinstance(new_array_a, torch.Tensor)
assert isinstance(new_array_b, torch.Tensor)
# with default arg
@array_converter(to_torch=True, apply_to=('array_a', 'array_b'))
def test_func_7(array_a, container, array_b=np.array([2.])):
container.append(array_a)
container.append(array_b)
return array_a.clone(), array_b.clone()
container = []
new_array_a, new_array_b = test_func_7(np_array_a, container)
assert isinstance(container[0], torch.Tensor)
assert isinstance(container[1], torch.Tensor)
assert isinstance(new_array_a, np.ndarray)
assert isinstance(new_array_b, np.ndarray)
assert np.allclose(new_array_b, np.array([2.]), 1e-3)
# override default arg
container = []
new_array_a, new_array_b = test_func_7(np_array_a, container,
np.array([4.]))
assert isinstance(container[0], torch.Tensor)
assert isinstance(container[1], torch.Tensor)
assert isinstance(new_array_a, np.ndarray)
assert np.allclose(new_array_b, np.array([4.]), 1e-3)
# list arg
@array_converter(to_torch=True, apply_to=('array_a', 'array_b'))
def test_func_8(container, array_a, array_b=[2.]):
container.append(array_a)
container.append(array_b)
return array_a.clone(), array_b.clone()
container = []
new_array_a, new_array_b = test_func_8(container, [3.])
assert isinstance(container[0], torch.Tensor)
assert isinstance(container[1], torch.Tensor)
assert np.allclose(new_array_a, np.array([3.]), 1e-3)
assert np.allclose(new_array_b, np.array([2.]), 1e-3)
# number arg
@array_converter(to_torch=True, apply_to=('array_a', 'array_b'))
def test_func_9(container, array_a, array_b=1):
container.append(array_a)
container.append(array_b)
return array_a.clone(), array_b.clone()
container = []
new_array_a, new_array_b = test_func_9(container, np_array_a)
assert isinstance(container[0], torch.FloatTensor)
assert isinstance(container[1], torch.FloatTensor)
assert np.allclose(new_array_a, np_array_a, 1e-3)
assert np.allclose(new_array_b, np.array(1.0), 1e-3)
# feed kwargs
container = []
kwargs = {'array_a': [5.], 'array_b': [6.]}
new_array_a, new_array_b = test_func_8(container, **kwargs)
assert isinstance(container[0], torch.Tensor)
assert isinstance(container[1], torch.Tensor)
assert np.allclose(new_array_a, np.array([5.]), 1e-3)
assert np.allclose(new_array_b, np.array([6.]), 1e-3)
# feed args and kwargs
container = []
kwargs = {'array_b': [7.]}
args = (container, [8.])
new_array_a, new_array_b = test_func_8(*args, **kwargs)
assert isinstance(container[0], torch.Tensor)
assert isinstance(container[1], torch.Tensor)
assert np.allclose(new_array_a, np.array([8.]), 1e-3)
assert np.allclose(new_array_b, np.array([7.]), 1e-3)
# wrong template arg type
with pytest.raises(TypeError):
new_array_a, new_array_b = test_func_9(container, 3 + 4j)
with pytest.raises(TypeError):
new_array_a, new_array_b = test_func_9(container, {})
# invalid template arg list
with pytest.raises(TypeError):
new_array_a, new_array_b = test_func_9(container,
[True, np.array([3.0])])
...@@ -206,7 +206,7 @@ parser.add_argument( ...@@ -206,7 +206,7 @@ parser.add_argument(
'--out-dir', '--out-dir',
type=str, type=str,
default='./data/kitti', default='./data/kitti',
required='False', required=False,
help='name of info pkl') help='name of info pkl')
parser.add_argument('--extra-tag', type=str, default='kitti') parser.add_argument('--extra-tag', type=str, default='kitti')
parser.add_argument( parser.add_argument(
......
...@@ -5,7 +5,7 @@ from collections import OrderedDict ...@@ -5,7 +5,7 @@ from collections import OrderedDict
from nuscenes.utils.geometry_utils import view_points from nuscenes.utils.geometry_utils import view_points
from pathlib import Path from pathlib import Path
from mmdet3d.core.bbox import box_np_ops from mmdet3d.core.bbox import box_np_ops, points_cam2img
from .kitti_data_utils import get_kitti_image_info, get_waymo_image_info from .kitti_data_utils import get_kitti_image_info, get_waymo_image_info
from .nuscenes_converter import post_process_coords from .nuscenes_converter import post_process_coords
...@@ -471,7 +471,7 @@ def get_2d_boxes(info, occluded, mono3d=True): ...@@ -471,7 +471,7 @@ def get_2d_boxes(info, occluded, mono3d=True):
repro_rec['velo_cam3d'] = -1 # no velocity in KITTI repro_rec['velo_cam3d'] = -1 # no velocity in KITTI
center3d = np.array(loc).reshape([1, 3]) center3d = np.array(loc).reshape([1, 3])
center2d = box_np_ops.points_cam2img( center2d = points_cam2img(
center3d, camera_intrinsic, with_depth=True) center3d, camera_intrinsic, with_depth=True)
repro_rec['center2d'] = center2d.squeeze().tolist() repro_rec['center2d'] = center2d.squeeze().tolist()
# normalized center2D + depth # normalized center2D + depth
......
...@@ -192,8 +192,10 @@ def _fill_trainval_infos(lyft, ...@@ -192,8 +192,10 @@ def _fill_trainval_infos(lyft,
names[i] = LyftDataset.NameMapping[names[i]] names[i] = LyftDataset.NameMapping[names[i]]
names = np.array(names) names = np.array(names)
# we need to convert rot to SECOND format. # we need to convert box size to
gt_boxes = np.concatenate([locs, dims, -rots - np.pi / 2], axis=1) # the format of our lidar coordinate system
# which is dx, dy, dz (corresponding to l, w, h)
gt_boxes = np.concatenate([locs, dims[:, [1, 0, 2]], rots], axis=1)
assert len(gt_boxes) == len( assert len(gt_boxes) == len(
annotations), f'{len(gt_boxes)}, {len(annotations)}' annotations), f'{len(gt_boxes)}, {len(annotations)}'
info['gt_boxes'] = gt_boxes info['gt_boxes'] = gt_boxes
......
...@@ -10,7 +10,7 @@ from pyquaternion import Quaternion ...@@ -10,7 +10,7 @@ from pyquaternion import Quaternion
from shapely.geometry import MultiPoint, box from shapely.geometry import MultiPoint, box
from typing import List, Tuple, Union from typing import List, Tuple, Union
from mmdet3d.core.bbox.box_np_ops import points_cam2img from mmdet3d.core.bbox import points_cam2img
from mmdet3d.datasets import NuScenesDataset from mmdet3d.datasets import NuScenesDataset
nus_categories = ('car', 'truck', 'trailer', 'bus', 'construction_vehicle', nus_categories = ('car', 'truck', 'trailer', 'bus', 'construction_vehicle',
...@@ -249,8 +249,10 @@ def _fill_trainval_infos(nusc, ...@@ -249,8 +249,10 @@ def _fill_trainval_infos(nusc,
if names[i] in NuScenesDataset.NameMapping: if names[i] in NuScenesDataset.NameMapping:
names[i] = NuScenesDataset.NameMapping[names[i]] names[i] = NuScenesDataset.NameMapping[names[i]]
names = np.array(names) names = np.array(names)
# we need to convert rot to SECOND format. # we need to convert box size to
gt_boxes = np.concatenate([locs, dims, -rots - np.pi / 2], axis=1) # the format of our lidar coordinate system
# which is dx, dy, dz (corresponding to l, w, h)
gt_boxes = np.concatenate([locs, dims[:, [1, 0, 2]], rots], axis=1)
assert len(gt_boxes) == len( assert len(gt_boxes) == len(
annotations), f'{len(gt_boxes)}, {len(annotations)}' annotations), f'{len(gt_boxes)}, {len(annotations)}'
info['gt_boxes'] = gt_boxes info['gt_boxes'] = gt_boxes
......
...@@ -42,18 +42,17 @@ class SUNRGBDInstance(object): ...@@ -42,18 +42,17 @@ class SUNRGBDInstance(object):
self.ymax = data[2] + data[4] self.ymax = data[2] + data[4]
self.box2d = np.array([self.xmin, self.ymin, self.xmax, self.ymax]) self.box2d = np.array([self.xmin, self.ymin, self.xmax, self.ymax])
self.centroid = np.array([data[5], data[6], data[7]]) self.centroid = np.array([data[5], data[6], data[7]])
self.w = data[8] # data[9] is dx (l), data[8] is dy (w), data[10] is dz (h)
self.l = data[9] # noqa: E741 # in our depth coordinate system,
self.h = data[10] # l corresponds to the size along the x axis
self.size = np.array([data[9], data[8], data[10]]) * 2
self.orientation = np.zeros((3, )) self.orientation = np.zeros((3, ))
self.orientation[0] = data[11] self.orientation[0] = data[11]
self.orientation[1] = data[12] self.orientation[1] = data[12]
self.heading_angle = -1 * np.arctan2(self.orientation[1], self.heading_angle = np.arctan2(self.orientation[1],
self.orientation[0]) self.orientation[0])
self.box3d = np.concatenate([ self.box3d = np.concatenate(
self.centroid, [self.centroid, self.size, self.heading_angle[None]])
np.array([self.l * 2, self.w * 2, self.h * 2, self.heading_angle])
])
class SUNRGBDData(object): class SUNRGBDData(object):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment