"torchvision/vscode:/vscode.git/clone" did not exist on "162267ca90a8a7d03a8d9249602d6c72b497656a"
Commit 2eebdc2d authored by Yezhen Cong's avatar Yezhen Cong Committed by Tai-Wang
Browse files

[Refactor] Main code modification for coordinate system refactor (#677)

parent 26ab7ff2
......@@ -132,8 +132,12 @@ def test_object_noise():
input_dict = object_noise(input_dict)
points = input_dict['points']
gt_bboxes_3d = input_dict['gt_bboxes_3d'].tensor
expected_gt_bboxes_3d = torch.tensor(
[[9.1724, -1.7559, -1.3550, 0.4800, 1.2000, 1.8900, 0.0505]])
# coord sys refactor (lidar2cam)
expected_gt_bboxes_3d = torch.tensor([[
9.1724, -1.7559, -1.3550, 1.2000, 0.4800, 1.8900,
0.0505 - float(rots) * 2 - np.pi / 2
]])
repr_str = repr(object_noise)
expected_repr_str = 'ObjectNoise(num_try=100, ' \
'translation_std=[0.25, 0.25, 0.25], ' \
......@@ -522,11 +526,11 @@ def test_random_flip_3d():
[21.2334, -9.3607, -0.2588, 0.0000],
[21.2179, -9.4372, -0.2598, 0.0000]])
expected_gt_bboxes_3d = torch.tensor(
[[38.9229, -18.4417, -1.1459, 0.7100, 1.7600, 1.8600, 5.4068],
[12.7768, -0.5795, -2.2682, 0.5700, 0.9900, 1.7200, 5.6445],
[12.7557, -2.2996, -1.4869, 0.6100, 1.1100, 1.9000, 5.0806],
[10.6677, -0.8064, -1.5435, 0.7900, 0.9600, 1.7900, 2.0560],
[5.0903, -5.1004, -1.2694, 0.7100, 1.7000, 1.8300, 5.0552]])
[[38.9229, -18.4417, -1.1459, 0.7100, 1.7600, 1.8600, 2.2652],
[12.7768, -0.5795, -2.2682, 0.5700, 0.9900, 1.7200, 2.5029],
[12.7557, -2.2996, -1.4869, 0.6100, 1.1100, 1.9000, 1.9390],
[10.6677, -0.8064, -1.5435, 0.7900, 0.9600, 1.7900, -1.0856],
[5.0903, -5.1004, -1.2694, 0.7100, 1.7000, 1.8300, 1.9136]])
repr_str = repr(random_flip_3d)
expected_repr_str = 'RandomFlip3D(sync_2d=True,' \
' flip_ratio_bev_vertical=1.0)'
......
......@@ -316,10 +316,24 @@ def test_sunrgbd_pipeline():
[0.8636, 1.3511, 0.0504, 0.0304],
[0.8690, 1.3461, 0.1265, 0.1065],
[0.8668, 1.3434, 0.1216, 0.1017]])
# Depth coordinate system update: only yaw changes since rotation in depth
# is counter-clockwise and yaw angle is clockwise originally
# But heading angles in sunrgbd data also reverses the sign
# and after horizontal flip the sign reverse again
rotation_angle = info['annos']['rotation_y']
expected_gt_bboxes_3d = torch.tensor(
[[-1.2136, 4.0206, -0.2412, 2.2493, 1.8444, 1.9245, 1.3989],
[-2.7420, 4.5777, -0.7686, 0.5718, 0.8629, 0.9510, 1.4446],
[0.9729, 1.9087, -0.1443, 0.6965, 1.5273, 2.0563, 2.9924]])
[[
-1.2136, 4.0206, -0.2412, 2.2493, 1.8444, 1.9245,
1.3989 + 0.047001579467984445 * 2 - 2 * rotation_angle[0]
],
[
-2.7420, 4.5777, -0.7686, 0.5718, 0.8629, 0.9510,
1.4446 + 0.047001579467984445 * 2 - 2 * rotation_angle[1]
],
[
0.9729, 1.9087, -0.1443, 0.6965, 1.5273, 2.0563,
2.9924 + 0.047001579467984445 * 2 - 2 * rotation_angle[2]
]]).float()
expected_gt_labels_3d = np.array([0, 7, 6])
assert torch.allclose(gt_bboxes_3d.tensor, expected_gt_bboxes_3d, 1e-3)
assert np.allclose(gt_labels_3d.flatten(), expected_gt_labels_3d)
......
......@@ -38,63 +38,64 @@ def test_outdoor_aug_pipeline():
]
pipeline = Compose(train_pipeline)
# coord sys refactor: reverse sign of yaw
gt_bboxes_3d = LiDARInstance3DBoxes(
torch.tensor([
[
2.16902428e+01, -4.06038128e-02, -1.61906636e+00,
1.65999997e+00, 3.20000005e+00, 1.61000001e+00, -1.53999996e+00
1.65999997e+00, 3.20000005e+00, 1.61000001e+00, 1.53999996e+00
],
[
7.05006886e+00, -6.57459593e+00, -1.60107934e+00,
2.27999997e+00, 1.27799997e+01, 3.66000009e+00, 1.54999995e+00
2.27999997e+00, 1.27799997e+01, 3.66000009e+00, -1.54999995e+00
],
[
2.24698811e+01, -6.69203758e+00, -1.50118136e+00,
2.31999993e+00, 1.47299995e+01, 3.64000010e+00, 1.59000003e+00
2.31999993e+00, 1.47299995e+01, 3.64000010e+00, -1.59000003e+00
],
[
3.48291969e+01, -7.09058380e+00, -1.36622977e+00,
2.31999993e+00, 1.00400000e+01, 3.60999990e+00, 1.61000001e+00
2.31999993e+00, 1.00400000e+01, 3.60999990e+00, -1.61000001e+00
],
[
4.62394600e+01, -7.75838804e+00, -1.32405007e+00,
2.33999991e+00, 1.28299999e+01, 3.63000011e+00, 1.63999999e+00
2.33999991e+00, 1.28299999e+01, 3.63000011e+00, -1.63999999e+00
],
[
2.82966995e+01, -5.55755794e-01, -1.30332506e+00,
1.47000003e+00, 2.23000002e+00, 1.48000002e+00, -1.57000005e+00
1.47000003e+00, 2.23000002e+00, 1.48000002e+00, 1.57000005e+00
],
[
2.66690197e+01, 2.18230209e+01, -1.73605704e+00,
1.55999994e+00, 3.48000002e+00, 1.39999998e+00, -1.69000006e+00
1.55999994e+00, 3.48000002e+00, 1.39999998e+00, 1.69000006e+00
],
[
3.13197803e+01, 8.16214371e+00, -1.62177873e+00,
1.74000001e+00, 3.76999998e+00, 1.48000002e+00, 2.78999996e+00
1.74000001e+00, 3.76999998e+00, 1.48000002e+00, -2.78999996e+00
],
[
4.34395561e+01, -1.95209332e+01, -1.20757008e+00,
1.69000006e+00, 4.09999990e+00, 1.40999997e+00, -1.53999996e+00
1.69000006e+00, 4.09999990e+00, 1.40999997e+00, 1.53999996e+00
],
[
3.29882965e+01, -3.79360509e+00, -1.69245458e+00,
1.74000001e+00, 4.09000015e+00, 1.49000001e+00, -1.52999997e+00
1.74000001e+00, 4.09000015e+00, 1.49000001e+00, 1.52999997e+00
],
[
3.85469360e+01, 8.35060215e+00, -1.31423414e+00,
1.59000003e+00, 4.28000021e+00, 1.45000005e+00, 1.73000002e+00
1.59000003e+00, 4.28000021e+00, 1.45000005e+00, -1.73000002e+00
],
[
2.22492104e+01, -1.13536005e+01, -1.38272512e+00,
1.62000000e+00, 3.55999994e+00, 1.71000004e+00, 2.48000002e+00
1.62000000e+00, 3.55999994e+00, 1.71000004e+00, -2.48000002e+00
],
[
3.36115799e+01, -1.97708054e+01, -4.92827654e-01,
1.64999998e+00, 3.54999995e+00, 1.79999995e+00, -1.57000005e+00
1.64999998e+00, 3.54999995e+00, 1.79999995e+00, 1.57000005e+00
],
[
9.85029602e+00, -1.51294518e+00, -1.66834795e+00,
1.59000003e+00, 3.17000008e+00, 1.38999999e+00, -8.39999974e-01
1.59000003e+00, 3.17000008e+00, 1.38999999e+00, 8.39999974e-01
]
],
dtype=torch.float32))
......@@ -105,23 +106,59 @@ def test_outdoor_aug_pipeline():
bbox3d_fields=[],
img_fields=[])
origin_center = gt_bboxes_3d.tensor[:, :3].clone()
origin_angle = gt_bboxes_3d.tensor[:, 6].clone()
output = pipeline(results)
# manually go through the pipeline
rotation_angle = output['img_metas']._data['pcd_rotation_angle']
rotation_matrix = output['img_metas']._data['pcd_rotation']
noise_angle = torch.tensor([
0.70853819, -0.19160091, -0.71116999, 0.49571753, -0.12447527,
-0.4690133, -0.34776965, -0.65692282, -0.52442831, -0.01575567,
-0.61849673, 0.6572608, 0.30312288, -0.19182971
])
noise_trans = torch.tensor([[1.7641e+00, 4.0016e-01, 4.8937e-01],
[-1.3065e+00, 1.6581e+00, -5.9082e-02],
[-1.5504e+00, 4.1732e-01, -4.7218e-01],
[-5.2158e-01, -1.1847e+00, 4.8035e-01],
[-8.9637e-01, -1.9627e+00, 7.9241e-01],
[1.3240e-02, -1.2194e-01, 1.6953e-01],
[8.1798e-01, -2.7891e-01, 7.1578e-01],
[-4.1733e-04, 3.7416e-01, 2.0478e-01],
[1.5218e-01, -3.7413e-01, -6.7257e-03],
[-1.9138e+00, -2.2855e+00, -8.0092e-01],
[1.5933e+00, 5.6872e-01, -5.7244e-02],
[-1.8523e+00, -7.1333e-01, -8.8111e-01],
[5.2678e-01, 1.0106e-01, -1.9432e-01],
[-7.2449e-01, -8.0292e-01, -1.1334e-02]])
angle = -origin_angle - noise_angle + torch.tensor(rotation_angle)
angle -= 2 * np.pi * (angle >= np.pi)
angle += 2 * np.pi * (angle < -np.pi)
scale = output['img_metas']._data['pcd_scale_factor']
expected_tensor = torch.tensor(
[[20.6514, -8.8250, -1.0816, 1.5893, 3.0637, 1.5414, -1.9216],
[7.9374, 4.9457, -1.2008, 2.1829, 12.2357, 3.5041, 1.6629],
[20.8115, -2.0273, -1.8893, 2.2212, 14.1026, 3.4850, 2.6513],
[32.3850, -5.2135, -1.1321, 2.2212, 9.6124, 3.4562, 2.6498],
[43.7022, -7.8316, -0.5090, 2.2403, 12.2836, 3.4754, 2.0146],
[25.3300, -9.6670, -1.0855, 1.4074, 2.1350, 1.4170, -0.7141],
[16.5414, -29.0583, -0.9768, 1.4936, 3.3318, 1.3404, -0.7153],
[24.6548, -18.9226, -1.3567, 1.6659, 3.6094, 1.4170, 1.3970],
[45.8403, 1.8183, -1.1626, 1.6180, 3.9254, 1.3499, -0.6886],
[30.6288, -8.4497, -1.4881, 1.6659, 3.9158, 1.4265, -0.7241],
[32.3316, -22.4611, -1.3131, 1.5223, 4.0977, 1.3882, 2.4186],
[22.4492, 3.2944, -2.1674, 1.5510, 3.4084, 1.6372, 0.3928],
[37.3824, 5.0472, -0.6579, 1.5797, 3.3988, 1.7233, -1.4862],
[8.9259, -1.2578, -1.6081, 1.5223, 3.0350, 1.3308, -1.7212]])
[[20.6514, -8.8250, -1.0816, 1.5893, 3.0637, 1.5414],
[7.9374, 4.9457, -1.2008, 2.1829, 12.2357, 3.5041],
[20.8115, -2.0273, -1.8893, 2.2212, 14.1026, 3.4850],
[32.3850, -5.2135, -1.1321, 2.2212, 9.6124, 3.4562],
[43.7022, -7.8316, -0.5090, 2.2403, 12.2836, 3.4754],
[25.3300, -9.6670, -1.0855, 1.4074, 2.1350, 1.4170],
[16.5414, -29.0583, -0.9768, 1.4936, 3.3318, 1.3404],
[24.6548, -18.9226, -1.3567, 1.6659, 3.6094, 1.4170],
[45.8403, 1.8183, -1.1626, 1.6180, 3.9254, 1.3499],
[30.6288, -8.4497, -1.4881, 1.6659, 3.9158, 1.4265],
[32.3316, -22.4611, -1.3131, 1.5223, 4.0977, 1.3882],
[22.4492, 3.2944, -2.1674, 1.5510, 3.4084, 1.6372],
[37.3824, 5.0472, -0.6579, 1.5797, 3.3988, 1.7233],
[8.9259, -1.2578, -1.6081, 1.5223, 3.0350, 1.3308]])
expected_tensor[:, :3] = ((
(origin_center + noise_trans) * torch.tensor([1, -1, 1]))
@ rotation_matrix) * scale
expected_tensor = torch.cat([expected_tensor, angle.unsqueeze(-1)], dim=-1)
assert torch.allclose(
output['gt_bboxes_3d']._data.tensor, expected_tensor, atol=1e-3)
......@@ -208,6 +245,11 @@ def test_outdoor_velocity_aug_pipeline():
bbox3d_fields=[],
img_fields=[])
origin_center = gt_bboxes_3d.tensor[:, :3].clone()
origin_angle = gt_bboxes_3d.tensor[:, 6].clone(
) # TODO: ObjectNoise modifies tensor!!
origin_velo = gt_bboxes_3d.tensor[:, 7:9].clone()
output = pipeline(results)
expected_tensor = torch.tensor(
......@@ -247,5 +289,21 @@ def test_outdoor_velocity_aug_pipeline():
-4.4522e+00, -2.9166e+01, -7.8938e-01, 2.2841e+00, 3.8348e+00,
1.5925e+00, 1.4721e+00, -7.8371e-03, -8.1931e-03
]])
# coord sys refactor (manually go through pipeline)
rotation_angle = output['img_metas']._data['pcd_rotation_angle']
rotation_matrix = output['img_metas']._data['pcd_rotation']
expected_tensor[:, :3] = ((origin_center @ rotation_matrix) *
output['img_metas']._data['pcd_scale_factor'] *
torch.tensor([1, -1, 1]))[[
0, 1, 2, 3, 4, 5, 6, 7, 9
]]
angle = -origin_angle - rotation_angle
angle -= 2 * np.pi * (angle >= np.pi)
angle += 2 * np.pi * (angle < -np.pi)
expected_tensor[:, 6:7] = angle.unsqueeze(-1)[[0, 1, 2, 3, 4, 5, 6, 7, 9]]
expected_tensor[:,
7:9] = ((origin_velo @ rotation_matrix[:2, :2]) *
output['img_metas']._data['pcd_scale_factor'] *
torch.tensor([1, -1]))[[0, 1, 2, 3, 4, 5, 6, 7, 9]]
assert torch.allclose(
output['gt_bboxes_3d']._data.tensor, expected_tensor, atol=1e-3)
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import pytest
import torch
......@@ -16,8 +17,8 @@ def test_RoIAwarePool3d():
roiaware_pool3d_avg = RoIAwarePool3d(
out_size=4, max_pts_per_voxel=128, mode='avg')
rois = torch.tensor(
[[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 0.3],
[-10.0, 23.0, 16.0, 10, 20, 20, 0.5]],
[[1.0, 2.0, 3.0, 5.0, 4.0, 6.0, -0.3 - np.pi / 2],
[-10.0, 23.0, 16.0, 20.0, 10.0, 20.0, -0.5 - np.pi / 2]],
dtype=torch.float32).cuda(
) # boxes (m, 7) with bottom center in lidar coordinate
pts = torch.tensor(
......@@ -64,6 +65,17 @@ def test_points_in_boxes_gpu():
assert point_indices.shape == torch.Size([2, 8])
assert (point_indices == expected_point_indices).all()
boxes = torch.tensor([[[0.0, 0.0, 0.0, 1.0, 20.0, 1.0, 0.523598]]],
dtype=torch.float32).cuda() # 30 degrees
pts = torch.tensor(
[[[4, 6.928, 0], [6.928, 4, 0], [4, -6.928, 0], [6.928, -4, 0],
[-4, 6.928, 0], [-6.928, 4, 0], [-4, -6.928, 0], [-6.928, -4, 0]]],
dtype=torch.float32).cuda()
point_indices = points_in_boxes_gpu(points=pts, boxes=boxes)
expected_point_indices = torch.tensor([[-1, -1, 0, -1, 0, -1, -1, -1]],
dtype=torch.int32).cuda()
assert (point_indices == expected_point_indices).all()
if torch.cuda.device_count() > 1:
pts = pts.to('cuda:1')
boxes = boxes.to('cuda:1')
......@@ -75,23 +87,35 @@ def test_points_in_boxes_gpu():
def test_points_in_boxes_cpu():
boxes = torch.tensor(
[[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 0.3],
[-10.0, 23.0, 16.0, 10, 20, 20, 0.5]],
[[[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 0.3],
[-10.0, 23.0, 16.0, 10, 20, 20, 0.5]]],
dtype=torch.float32
) # boxes (m, 7) with bottom center in lidar coordinate
pts = torch.tensor(
[[1, 2, 3.3], [1.2, 2.5, 3.0], [0.8, 2.1, 3.5], [1.6, 2.6, 3.6],
[0.8, 1.2, 3.9], [-9.2, 21.0, 18.2], [3.8, 7.9, 6.3],
[4.7, 3.5, -12.2], [3.8, 7.6, -2], [-10.6, -12.9, -20], [-16, -18, 9],
[-21.3, -52, -5], [0, 0, 0], [6, 7, 8], [-2, -3, -4]],
[[[1, 2, 3.3], [1.2, 2.5, 3.0], [0.8, 2.1, 3.5], [1.6, 2.6, 3.6],
[0.8, 1.2, 3.9], [-9.2, 21.0, 18.2], [3.8, 7.9, 6.3],
[4.7, 3.5, -12.2], [3.8, 7.6, -2], [-10.6, -12.9, -20], [
-16, -18, 9
], [-21.3, -52, -5], [0, 0, 0], [6, 7, 8], [-2, -3, -4]]],
dtype=torch.float32) # points (n, 3) in lidar coordinate
point_indices = points_in_boxes_cpu(points=pts, boxes=boxes)
expected_point_indices = torch.tensor(
[[1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
[[[1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [0, 1], [0, 0], [0, 0],
[0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0]]],
dtype=torch.int32)
assert point_indices.shape == torch.Size([2, 15])
assert point_indices.shape == torch.Size([1, 15, 2])
assert (point_indices == expected_point_indices).all()
boxes = torch.tensor([[[0.0, 0.0, 0.0, 1.0, 20.0, 1.0, 0.523598]]],
dtype=torch.float32) # 30 degrees
pts = torch.tensor(
[[[4, 6.928, 0], [6.928, 4, 0], [4, -6.928, 0], [6.928, -4, 0],
[-4, 6.928, 0], [-6.928, 4, 0], [-4, -6.928, 0], [-6.928, -4, 0]]],
dtype=torch.float32)
point_indices = points_in_boxes_cpu(points=pts, boxes=boxes)
expected_point_indices = torch.tensor(
[[[0], [0], [1], [0], [1], [0], [0], [0]]], dtype=torch.int32)
assert (point_indices == expected_point_indices).all()
......
......@@ -76,7 +76,7 @@ def test_loss():
2.0579e-02, 1.5005e-04, 3.5252e-05, 0.0000e+00, 2.0433e-05, 1.5422e-05
])
expected_loss_bbox = torch.as_tensor(0.0622)
expected_loss_corner = torch.Tensor([0.1379])
expected_loss_corner = torch.Tensor([0.1374])
assert torch.allclose(loss['loss_cls'], expected_loss_cls, 1e-3)
assert torch.allclose(loss['loss_bbox'], expected_loss_bbox, 1e-3)
......@@ -201,7 +201,7 @@ def test_get_targets():
])
expected_bbox_targets = torch.Tensor(
[[0.0805, 0.0130, 0.0047, 0.0542, -0.2252, 0.0299, -0.1495]])
[[-0.0632, 0.0516, 0.0047, 0.0542, -0.2252, 0.0299, -0.1495]])
expected_pos_gt_bboxes = torch.Tensor(
[[7.8417, -0.1405, -1.9652, 1.6122, 3.2838, 1.5331, -2.0835]])
......@@ -345,12 +345,11 @@ def test_get_bboxes():
selected_bboxes, selected_scores, selected_label_preds = result_list[0]
expected_selected_bboxes = torch.Tensor(
[[56.2170, 25.9074, -1.3610, 1.6025, 3.6730, 1.5128, -0.1179],
[54.6521, 28.8846, -1.9145, 1.6362, 4.0573, 1.5599, -1.7335],
[31.6179, -5.6004, -1.2470, 1.6458, 4.1622, 1.5632, -1.5734]]).cuda()
[[56.0888, 25.6445, -1.3610, 1.6025, 3.6730, 1.5128, -0.1179],
[54.4606, 29.2412, -1.9145, 1.6362, 4.0573, 1.5599, -1.7335],
[31.8887, -5.8574, -1.2470, 1.6458, 4.1622, 1.5632, -1.5734]]).cuda()
expected_selected_scores = torch.Tensor([-2.2061, -2.1121, -0.1761]).cuda()
expected_selected_label_preds = torch.Tensor([2., 2., 2.]).cuda()
assert torch.allclose(selected_bboxes.tensor, expected_selected_bboxes,
1e-3)
assert torch.allclose(selected_scores, expected_selected_scores, 1e-3)
......@@ -387,43 +386,43 @@ def test_multi_class_nms():
box_preds = torch.Tensor(
[[
5.6217e+01, 2.5908e+01, -1.3611e+00, 1.6025e+00, 3.6730e+00,
1.5129e+00, -1.1786e-01
1.5129e+00, 1.1786e-01
],
[
5.4653e+01, 2.8885e+01, -1.9145e+00, 1.6362e+00, 4.0574e+00,
1.5599e+00, -1.7335e+00
1.5599e+00, 1.7335e+00
],
[
5.5809e+01, 2.5686e+01, -1.4457e+00, 1.5939e+00, 3.8270e+00,
1.4997e+00, -2.9191e+00
1.4997e+00, 2.9191e+00
],
[
5.6107e+01, 2.6082e+01, -1.3557e+00, 1.5782e+00, 3.7444e+00,
1.5266e+00, 1.7707e-01
1.5266e+00, -1.7707e-01
],
[
3.1618e+01, -5.6004e+00, -1.2470e+00, 1.6459e+00, 4.1622e+00,
1.5632e+00, -1.5734e+00
1.5632e+00, 1.5734e+00
],
[
3.1605e+01, -5.6342e+00, -1.2467e+00, 1.6474e+00, 4.1519e+00,
1.5481e+00, -1.6313e+00
1.5481e+00, 1.6313e+00
],
[
5.6211e+01, 2.7294e+01, -1.5350e+00, 1.5422e+00, 3.7733e+00,
1.5140e+00, 9.5846e-02
1.5140e+00, -9.5846e-02
],
[
5.5907e+01, 2.7155e+01, -1.4712e+00, 1.5416e+00, 3.7611e+00,
1.5142e+00, -5.2059e-02
1.5142e+00, 5.2059e-02
],
[
5.4000e+01, 3.0585e+01, -1.6874e+00, 1.6495e+00, 4.0376e+00,
1.5554e+00, -1.7900e+00
1.5554e+00, 1.7900e+00
],
[
5.6007e+01, 2.6300e+01, -1.3945e+00, 1.5716e+00, 3.7064e+00,
1.4715e+00, -2.9639e+00
1.4715e+00, 2.9639e+00
]]).cuda()
input_meta = dict(
......
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import pytest
import torch
......@@ -21,8 +22,8 @@ def test_single_roiaware_extractor():
dtype=torch.float32).cuda()
coordinate = feats.clone()
batch_inds = torch.zeros(feats.shape[0]).cuda()
rois = torch.tensor([[0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 0.3],
[0, -10.0, 23.0, 16.0, 10, 20, 20, 0.5]],
rois = torch.tensor([[0, 1.0, 2.0, 3.0, 5.0, 4.0, 6.0, -0.3 - np.pi / 2],
[0, -10.0, 23.0, 16.0, 20, 10, 20, -0.5 - np.pi / 2]],
dtype=torch.float32).cuda()
# test forward
pooled_feats = self(feats, coordinate, batch_inds, rois)
......
......@@ -53,11 +53,11 @@ def test_PointwiseSemanticHead():
gt_bboxes = [
LiDARInstance3DBoxes(
torch.tensor(
[[6.4118, -3.4305, -1.7291, 1.7033, 3.4693, 1.6197, -0.9091]],
[[6.4118, -3.4305, -1.7291, 1.7033, 3.4693, 1.6197, 0.9091]],
dtype=torch.float32).cuda()),
LiDARInstance3DBoxes(
torch.tensor(
[[16.9107, 9.7925, -1.9201, 1.6097, 3.2786, 1.5307, -2.4056]],
[[16.9107, 9.7925, -1.9201, 1.6097, 3.2786, 1.5307, 2.4056]],
dtype=torch.float32).cuda())
]
# batch size is 2 in the unit test
......
......@@ -22,7 +22,7 @@ def test_anchor_3d_range_generator():
[0, -39.68, -0.6, 70.4, 39.68, -0.6],
[0, -39.68, -1.78, 70.4, 39.68, -1.78],
],
sizes=[[0.6, 0.8, 1.73], [0.6, 1.76, 1.73], [1.6, 3.9, 1.56]],
sizes=[[0.8, 0.6, 1.73], [1.76, 0.6, 1.73], [3.9, 1.6, 1.56]],
rotations=[0, 1.57],
reshape_out=False)
......@@ -32,8 +32,8 @@ def test_anchor_3d_range_generator():
'[[0, -39.68, -0.6, 70.4, 39.68, -0.6], ' \
'[0, -39.68, -0.6, 70.4, 39.68, -0.6], ' \
'[0, -39.68, -1.78, 70.4, 39.68, -1.78]],' \
'\nscales=[1],\nsizes=[[0.6, 0.8, 1.73], ' \
'[0.6, 1.76, 1.73], [1.6, 3.9, 1.56]],' \
'\nscales=[1],\nsizes=[[0.8, 0.6, 1.73], ' \
'[1.76, 0.6, 1.73], [3.9, 1.6, 1.56]],' \
'\nrotations=[0, 1.57],\nreshape_out=False,' \
'\nsize_per_range=True)'
assert repr_str == expected_repr_str
......@@ -54,8 +54,8 @@ def test_aligned_anchor_generator():
ranges=[[-51.2, -51.2, -1.80, 51.2, 51.2, -1.80]],
scales=[1, 2, 4],
sizes=[
[0.8660, 2.5981, 1.], # 1.5/sqrt(3)
[0.5774, 1.7321, 1.], # 1/sqrt(3)
[2.5981, 0.8660, 1.], # 1.5/sqrt(3)
[1.7321, 0.5774, 1.], # 1/sqrt(3)
[1., 1., 1.],
[0.4, 0.4, 1],
],
......@@ -71,7 +71,7 @@ def test_aligned_anchor_generator():
# check base anchors
expected_grid_anchors = [
torch.tensor([[
-51.0000, -51.0000, -1.8000, 0.8660, 2.5981, 1.0000, 0.0000,
-51.0000, -51.0000, -1.8000, 2.5981, 0.8660, 1.0000, 0.0000,
0.0000, 0.0000
],
[
......@@ -91,20 +91,20 @@ def test_aligned_anchor_generator():
0.0000, 0.0000, 0.0000
],
[
-49.4000, -51.0000, -1.8000, 0.5774, 1.7321, 1.0000,
-49.4000, -51.0000, -1.8000, 1.7321, 0.5774, 1.0000,
1.5700, 0.0000, 0.0000
],
[
-49.0000, -51.0000, -1.8000, 0.5774, 1.7321, 1.0000,
-49.0000, -51.0000, -1.8000, 1.7321, 0.5774, 1.0000,
0.0000, 0.0000, 0.0000
],
[
-48.6000, -51.0000, -1.8000, 0.8660, 2.5981, 1.0000,
-48.6000, -51.0000, -1.8000, 2.5981, 0.8660, 1.0000,
1.5700, 0.0000, 0.0000
]],
device=device),
torch.tensor([[
-50.8000, -50.8000, -1.8000, 1.7320, 5.1962, 2.0000, 0.0000,
-50.8000, -50.8000, -1.8000, 5.1962, 1.7320, 2.0000, 0.0000,
0.0000, 0.0000
],
[
......@@ -124,20 +124,20 @@ def test_aligned_anchor_generator():
0.0000, 0.0000, 0.0000
],
[
-47.6000, -50.8000, -1.8000, 1.1548, 3.4642, 2.0000,
-47.6000, -50.8000, -1.8000, 3.4642, 1.1548, 2.0000,
1.5700, 0.0000, 0.0000
],
[
-46.8000, -50.8000, -1.8000, 1.1548, 3.4642, 2.0000,
-46.8000, -50.8000, -1.8000, 3.4642, 1.1548, 2.0000,
0.0000, 0.0000, 0.0000
],
[
-46.0000, -50.8000, -1.8000, 1.7320, 5.1962, 2.0000,
-46.0000, -50.8000, -1.8000, 5.1962, 1.7320, 2.0000,
1.5700, 0.0000, 0.0000
]],
device=device),
torch.tensor([[
-50.4000, -50.4000, -1.8000, 3.4640, 10.3924, 4.0000, 0.0000,
-50.4000, -50.4000, -1.8000, 10.3924, 3.4640, 4.0000, 0.0000,
0.0000, 0.0000
],
[
......@@ -157,15 +157,15 @@ def test_aligned_anchor_generator():
0.0000, 0.0000, 0.0000
],
[
-44.0000, -50.4000, -1.8000, 2.3096, 6.9284, 4.0000,
-44.0000, -50.4000, -1.8000, 6.9284, 2.3096, 4.0000,
1.5700, 0.0000, 0.0000
],
[
-42.4000, -50.4000, -1.8000, 2.3096, 6.9284, 4.0000,
-42.4000, -50.4000, -1.8000, 6.9284, 2.3096, 4.0000,
0.0000, 0.0000, 0.0000
],
[
-40.8000, -50.4000, -1.8000, 3.4640, 10.3924, 4.0000,
-40.8000, -50.4000, -1.8000, 10.3924, 3.4640, 4.0000,
1.5700, 0.0000, 0.0000
]],
device=device)
......@@ -194,7 +194,7 @@ def test_aligned_anchor_generator_per_cls():
type='AlignedAnchor3DRangeGeneratorPerCls',
ranges=[[-100, -100, -1.80, 100, 100, -1.80],
[-100, -100, -1.30, 100, 100, -1.30]],
sizes=[[0.63, 1.76, 1.44], [0.96, 2.35, 1.59]],
sizes=[[1.76, 0.63, 1.44], [2.35, 0.96, 1.59]],
custom_values=[0, 0],
rotations=[0, 1.57],
reshape_out=False)
......@@ -205,20 +205,20 @@ def test_aligned_anchor_generator_per_cls():
# check base anchors
expected_grid_anchors = [[
torch.tensor([[
-99.0000, -99.0000, -1.8000, 0.6300, 1.7600, 1.4400, 0.0000,
-99.0000, -99.0000, -1.8000, 1.7600, 0.6300, 1.4400, 0.0000,
0.0000, 0.0000
],
[
-99.0000, -99.0000, -1.8000, 0.6300, 1.7600, 1.4400,
-99.0000, -99.0000, -1.8000, 1.7600, 0.6300, 1.4400,
1.5700, 0.0000, 0.0000
]],
device=device),
torch.tensor([[
-98.0000, -98.0000, -1.3000, 0.9600, 2.3500, 1.5900, 0.0000,
-98.0000, -98.0000, -1.3000, 2.3500, 0.9600, 1.5900, 0.0000,
0.0000, 0.0000
],
[
-98.0000, -98.0000, -1.3000, 0.9600, 2.3500, 1.5900,
-98.0000, -98.0000, -1.3000, 2.3500, 0.9600, 1.5900,
1.5700, 0.0000, 0.0000
]],
device=device)
......
This diff is collapsed.
......@@ -20,7 +20,7 @@ def test_camera_to_lidar():
def test_box_camera_to_lidar():
from mmdet3d.core.bbox.box_np_ops import box_camera_to_lidar
box = np.array([[1.84, 1.47, 8.41, 1.2, 1.89, 0.48, 0.01]])
box = np.array([[1.84, 1.47, 8.41, 1.2, 1.89, 0.48, -0.01]])
rect = np.array([[0.9999128, 0.01009263, -0.00851193, 0.],
[-0.01012729, 0.9999406, -0.00403767, 0.],
[0.00847068, 0.00412352, 0.9999556, 0.], [0., 0., 0.,
......@@ -30,8 +30,9 @@ def test_box_camera_to_lidar():
[0.9999753, 0.00693114, -0.0011439, -0.3321029],
[0., 0., 0., 1.]])
box_lidar = box_camera_to_lidar(box, rect, Trv2c)
expected_box = np.array(
[[8.73138192, -1.85591746, -1.59969933, 0.48, 1.2, 1.89, 0.01]])
expected_box = np.array([[
8.73138192, -1.85591746, -1.59969933, 1.2, 0.48, 1.89, 0.01 - np.pi / 2
]])
assert np.allclose(box_lidar, expected_box)
......@@ -48,22 +49,17 @@ def test_center_to_corner_box2d():
from mmdet3d.core.bbox.box_np_ops import center_to_corner_box2d
center = np.array([[9.348705, -3.6271024]])
dims = np.array([[0.47, 0.98]])
angles = np.array([-3.14])
angles = np.array([3.14])
corner = center_to_corner_box2d(center, dims, angles)
expected_corner = np.array([[[9.584485, -3.1374772], [9.582925, -4.117476],
[9.112926, -4.1167274],
[9.114486, -3.1367288]]])
assert np.allclose(corner, expected_corner)
def test_rotation_2d():
from mmdet3d.core.bbox.box_np_ops import rotation_2d
angles = np.array([-3.14])
corners = np.array([[[-0.235, -0.49], [-0.235, 0.49], [0.235, 0.49],
[0.235, -0.49]]])
corners_rotated = rotation_2d(corners, angles)
expected_corners = np.array([[[0.2357801, 0.48962511],
[0.2342193, -0.49037365],
[-0.2357801, -0.48962511],
[-0.2342193, 0.49037365]]])
assert np.allclose(corners_rotated, expected_corners)
center = np.array([[-0.0, 0.0]])
dims = np.array([[4.0, 8.0]])
angles = np.array([-0.785398]) # -45 degrees
corner = center_to_corner_box2d(center, dims, angles)
expected_corner = np.array([[[-4.24264, -1.41421], [1.41421, 4.24264],
[4.24264, 1.41421], [-1.41421, -4.24264]]])
assert np.allclose(corner, expected_corner)
......@@ -3,7 +3,8 @@ import numpy as np
import torch
from mmdet3d.core.bbox import (CameraInstance3DBoxes, Coord3DMode,
DepthInstance3DBoxes, LiDARInstance3DBoxes)
DepthInstance3DBoxes, LiDARInstance3DBoxes,
limit_period)
from mmdet3d.core.points import CameraPoints, DepthPoints, LiDARPoints
......@@ -242,22 +243,31 @@ def test_boxes_conversion():
convert_lidar_boxes = Coord3DMode.convert(cam_boxes, Coord3DMode.CAM,
Coord3DMode.LIDAR)
expected_tensor = torch.tensor(
[[-1.7501, -1.7802, -2.5162, 1.6500, 1.7500, 3.3900, 1.4800],
[-1.6357, -8.9594, -2.4567, 1.5700, 1.5400, 4.0100, 1.6200],
[-1.3033, -28.2967, 0.5558, 1.4800, 1.4700, 2.2300, -1.5700],
[-1.7361, -26.6690, -21.8230, 1.4000, 1.5600, 3.4800, -1.6900],
[-1.6218, -31.3198, -8.1621, 1.4800, 1.7400, 3.7700, 2.7900]])
expected_tensor = torch.tensor([[
-1.7501, -1.7802, -2.5162, 1.7500, 1.6500, 3.3900, -1.4800 - np.pi / 2
], [
-1.6357, -8.9594, -2.4567, 1.5400, 1.5700, 4.0100, -1.6200 - np.pi / 2
], [-1.3033, -28.2967, 0.5558, 1.4700, 1.4800, 2.2300, 1.5700 - np.pi / 2],
[
-1.7361, -26.6690, -21.8230, 1.5600,
1.4000, 3.4800, 1.6900 - np.pi / 2
],
[
-1.6218, -31.3198, -8.1621, 1.7400,
1.4800, 3.7700, -2.7900 - np.pi / 2
]])
expected_tensor[:, -1:] = limit_period(
expected_tensor[:, -1:], period=np.pi * 2)
assert torch.allclose(expected_tensor, convert_lidar_boxes.tensor, 1e-3)
convert_depth_boxes = Coord3DMode.convert(cam_boxes, Coord3DMode.CAM,
Coord3DMode.DEPTH)
expected_tensor = torch.tensor(
[[1.7802, 1.7501, 2.5162, 1.7500, 1.6500, 3.3900, 1.4800],
[8.9594, 1.6357, 2.4567, 1.5400, 1.5700, 4.0100, 1.6200],
[28.2967, 1.3033, -0.5558, 1.4700, 1.4800, 2.2300, -1.5700],
[26.6690, 1.7361, 21.8230, 1.5600, 1.4000, 3.4800, -1.6900],
[31.3198, 1.6218, 8.1621, 1.7400, 1.4800, 3.7700, 2.7900]])
[[1.7802, 1.7501, 2.5162, 1.7500, 1.6500, 3.3900, -1.4800],
[8.9594, 1.6357, 2.4567, 1.5400, 1.5700, 4.0100, -1.6200],
[28.2967, 1.3033, -0.5558, 1.4700, 1.4800, 2.2300, 1.5700],
[26.6690, 1.7361, 21.8230, 1.5600, 1.4000, 3.4800, 1.6900],
[31.3198, 1.6218, 8.1621, 1.7400, 1.4800, 3.7700, -2.7900]])
assert torch.allclose(expected_tensor, convert_depth_boxes.tensor, 1e-3)
# test LIDAR to CAM and DEPTH
......@@ -269,22 +279,42 @@ def test_boxes_conversion():
[31.31978, 8.162144, -1.6217787, 1.74, 3.77, 1.48, 2.79]])
convert_cam_boxes = Coord3DMode.convert(lidar_boxes, Coord3DMode.LIDAR,
Coord3DMode.CAM)
expected_tensor = torch.tensor(
[[-2.5162, 1.7501, 1.7802, 3.3900, 1.6500, 1.7500, 1.4800],
[-2.4567, 1.6357, 8.9594, 4.0100, 1.5700, 1.5400, 1.6200],
[0.5558, 1.3033, 28.2967, 2.2300, 1.4800, 1.4700, -1.5700],
[-21.8230, 1.7361, 26.6690, 3.4800, 1.4000, 1.5600, -1.6900],
[-8.1621, 1.6218, 31.3198, 3.7700, 1.4800, 1.7400, 2.7900]])
expected_tensor = torch.tensor([
[-2.5162, 1.7501, 1.7802, 1.7500, 1.6500, 3.3900, -1.4800 - np.pi / 2],
[-2.4567, 1.6357, 8.9594, 1.5400, 1.5700, 4.0100, -1.6200 - np.pi / 2],
[0.5558, 1.3033, 28.2967, 1.4700, 1.4800, 2.2300, 1.5700 - np.pi / 2],
[
-21.8230, 1.7361, 26.6690, 1.5600, 1.4000, 3.4800,
1.6900 - np.pi / 2
],
[
-8.1621, 1.6218, 31.3198, 1.7400, 1.4800, 3.7700,
-2.7900 - np.pi / 2
]
])
expected_tensor[:, -1:] = limit_period(
expected_tensor[:, -1:], period=np.pi * 2)
assert torch.allclose(expected_tensor, convert_cam_boxes.tensor, 1e-3)
convert_depth_boxes = Coord3DMode.convert(lidar_boxes, Coord3DMode.LIDAR,
Coord3DMode.DEPTH)
expected_tensor = torch.tensor(
[[-2.5162, 1.7802, -1.7501, 3.3900, 1.7500, 1.6500, 1.4800],
[-2.4567, 8.9594, -1.6357, 4.0100, 1.5400, 1.5700, 1.6200],
[0.5558, 28.2967, -1.3033, 2.2300, 1.4700, 1.4800, -1.5700],
[-21.8230, 26.6690, -1.7361, 3.4800, 1.5600, 1.4000, -1.6900],
[-8.1621, 31.3198, -1.6218, 3.7700, 1.7400, 1.4800, 2.7900]])
expected_tensor = torch.tensor([[
-2.5162, 1.7802, -1.7501, 1.7500, 3.3900, 1.6500, 1.4800 + np.pi / 2
], [-2.4567, 8.9594, -1.6357, 1.5400, 4.0100, 1.5700, 1.6200 + np.pi / 2],
[
0.5558, 28.2967, -1.3033, 1.4700,
2.2300, 1.4800, -1.5700 + np.pi / 2
],
[
-21.8230, 26.6690, -1.7361, 1.5600,
3.4800, 1.4000, -1.6900 + np.pi / 2
],
[
-8.1621, 31.3198, -1.6218, 1.7400,
3.7700, 1.4800, 2.7900 + np.pi / 2
]])
expected_tensor[:, -1:] = limit_period(
expected_tensor[:, -1:], period=np.pi * 2)
assert torch.allclose(expected_tensor, convert_depth_boxes.tensor, 1e-3)
# test DEPTH to CAM and LIDAR
......@@ -297,19 +327,25 @@ def test_boxes_conversion():
convert_cam_boxes = Coord3DMode.convert(depth_boxes, Coord3DMode.DEPTH,
Coord3DMode.CAM)
expected_tensor = torch.tensor(
[[1.7802, -1.7501, -2.5162, 1.7500, 1.6500, 3.3900, 1.4800],
[8.9594, -1.6357, -2.4567, 1.5400, 1.5700, 4.0100, 1.6200],
[28.2967, -1.3033, 0.5558, 1.4700, 1.4800, 2.2300, -1.5700],
[26.6690, -1.7361, -21.8230, 1.5600, 1.4000, 3.4800, -1.6900],
[31.3198, -1.6218, -8.1621, 1.7400, 1.4800, 3.7700, 2.7900]])
[[1.7802, -1.7501, -2.5162, 1.7500, 1.6500, 3.3900, -1.4800],
[8.9594, -1.6357, -2.4567, 1.5400, 1.5700, 4.0100, -1.6200],
[28.2967, -1.3033, 0.5558, 1.4700, 1.4800, 2.2300, 1.5700],
[26.6690, -1.7361, -21.8230, 1.5600, 1.4000, 3.4800, 1.6900],
[31.3198, -1.6218, -8.1621, 1.7400, 1.4800, 3.7700, -2.7900]])
assert torch.allclose(expected_tensor, convert_cam_boxes.tensor, 1e-3)
convert_lidar_boxes = Coord3DMode.convert(depth_boxes, Coord3DMode.DEPTH,
Coord3DMode.LIDAR)
expected_tensor = torch.tensor(
[[2.5162, -1.7802, -1.7501, 3.3900, 1.7500, 1.6500, 1.4800],
[2.4567, -8.9594, -1.6357, 4.0100, 1.5400, 1.5700, 1.6200],
[-0.5558, -28.2967, -1.3033, 2.2300, 1.4700, 1.4800, -1.5700],
[21.8230, -26.6690, -1.7361, 3.4800, 1.5600, 1.4000, -1.6900],
[8.1621, -31.3198, -1.6218, 3.7700, 1.7400, 1.4800, 2.7900]])
expected_tensor = torch.tensor([[
2.5162, -1.7802, -1.7501, 1.7500, 3.3900, 1.6500, 1.4800 - np.pi / 2
], [
2.4567, -8.9594, -1.6357, 1.5400, 4.0100, 1.5700, 1.6200 - np.pi / 2
], [
-0.5558, -28.2967, -1.3033, 1.4700, 2.2300, 1.4800, -1.5700 - np.pi / 2
], [
21.8230, -26.6690, -1.7361, 1.5600, 3.4800, 1.4000, -1.6900 - np.pi / 2
], [8.1621, -31.3198, -1.6218, 1.7400, 3.7700, 1.4800,
2.7900 - np.pi / 2]])
expected_tensor[:, -1:] = limit_period(
expected_tensor[:, -1:], period=np.pi * 2)
assert torch.allclose(expected_tensor, convert_lidar_boxes.tensor, 1e-3)
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import pytest
import torch
from mmdet3d.core import draw_heatmap_gaussian
from mmdet3d.core import array_converter, draw_heatmap_gaussian
def test_gaussian():
......@@ -10,3 +12,169 @@ def test_gaussian():
radius = 2
draw_heatmap_gaussian(heatmap, ct_int, radius)
assert torch.isclose(torch.sum(heatmap), torch.tensor(4.3505), atol=1e-3)
def test_array_converter():
# to torch
@array_converter(to_torch=True, apply_to=('array_a', 'array_b'))
def test_func_1(array_a, array_b, container):
container.append(array_a)
container.append(array_b)
return array_a.clone(), array_b.clone()
np_array_a = np.array([0.0])
np_array_b = np.array([0.0])
container = []
new_array_a, new_array_b = test_func_1(np_array_a, np_array_b, container)
assert isinstance(new_array_a, np.ndarray)
assert isinstance(new_array_b, np.ndarray)
assert isinstance(container[0], torch.Tensor)
assert isinstance(container[1], torch.Tensor)
# one to torch and one not
@array_converter(to_torch=True, apply_to=('array_a', ))
def test_func_2(array_a, array_b):
return torch.cat([array_a, array_b])
with pytest.raises(TypeError):
_ = test_func_2(np_array_a, np_array_b)
# wrong template_arg_name_
@array_converter(
to_torch=True, apply_to=('array_a', ), template_arg_name_='array_c')
def test_func_3(array_a, array_b):
return torch.cat([array_a, array_b])
with pytest.raises(ValueError):
_ = test_func_3(np_array_a, np_array_b)
# wrong apply_to
@array_converter(to_torch=True, apply_to=('array_a', 'array_c'))
def test_func_4(array_a, array_b):
return torch.cat([array_a, array_b])
with pytest.raises(ValueError):
_ = test_func_4(np_array_a, np_array_b)
# to numpy
@array_converter(to_torch=False, apply_to=('array_a', 'array_b'))
def test_func_5(array_a, array_b, container):
container.append(array_a)
container.append(array_b)
return array_a.copy(), array_b.copy()
pt_array_a = torch.tensor([0.0])
pt_array_b = torch.tensor([0.0])
container = []
new_array_a, new_array_b = test_func_5(pt_array_a, pt_array_b, container)
assert isinstance(container[0], np.ndarray)
assert isinstance(container[1], np.ndarray)
assert isinstance(new_array_a, torch.Tensor)
assert isinstance(new_array_b, torch.Tensor)
# apply_to = None
@array_converter(to_torch=False)
def test_func_6(array_a, array_b, container):
container.append(array_a)
container.append(array_b)
return array_a.clone(), array_b.clone()
container = []
new_array_a, new_array_b = test_func_6(pt_array_a, pt_array_b, container)
assert isinstance(container[0], torch.Tensor)
assert isinstance(container[1], torch.Tensor)
assert isinstance(new_array_a, torch.Tensor)
assert isinstance(new_array_b, torch.Tensor)
# with default arg
@array_converter(to_torch=True, apply_to=('array_a', 'array_b'))
def test_func_7(array_a, container, array_b=np.array([2.])):
container.append(array_a)
container.append(array_b)
return array_a.clone(), array_b.clone()
container = []
new_array_a, new_array_b = test_func_7(np_array_a, container)
assert isinstance(container[0], torch.Tensor)
assert isinstance(container[1], torch.Tensor)
assert isinstance(new_array_a, np.ndarray)
assert isinstance(new_array_b, np.ndarray)
assert np.allclose(new_array_b, np.array([2.]), 1e-3)
# override default arg
container = []
new_array_a, new_array_b = test_func_7(np_array_a, container,
np.array([4.]))
assert isinstance(container[0], torch.Tensor)
assert isinstance(container[1], torch.Tensor)
assert isinstance(new_array_a, np.ndarray)
assert np.allclose(new_array_b, np.array([4.]), 1e-3)
# list arg
@array_converter(to_torch=True, apply_to=('array_a', 'array_b'))
def test_func_8(container, array_a, array_b=[2.]):
container.append(array_a)
container.append(array_b)
return array_a.clone(), array_b.clone()
container = []
new_array_a, new_array_b = test_func_8(container, [3.])
assert isinstance(container[0], torch.Tensor)
assert isinstance(container[1], torch.Tensor)
assert np.allclose(new_array_a, np.array([3.]), 1e-3)
assert np.allclose(new_array_b, np.array([2.]), 1e-3)
# number arg
@array_converter(to_torch=True, apply_to=('array_a', 'array_b'))
def test_func_9(container, array_a, array_b=1):
container.append(array_a)
container.append(array_b)
return array_a.clone(), array_b.clone()
container = []
new_array_a, new_array_b = test_func_9(container, np_array_a)
assert isinstance(container[0], torch.FloatTensor)
assert isinstance(container[1], torch.FloatTensor)
assert np.allclose(new_array_a, np_array_a, 1e-3)
assert np.allclose(new_array_b, np.array(1.0), 1e-3)
# feed kwargs
container = []
kwargs = {'array_a': [5.], 'array_b': [6.]}
new_array_a, new_array_b = test_func_8(container, **kwargs)
assert isinstance(container[0], torch.Tensor)
assert isinstance(container[1], torch.Tensor)
assert np.allclose(new_array_a, np.array([5.]), 1e-3)
assert np.allclose(new_array_b, np.array([6.]), 1e-3)
# feed args and kwargs
container = []
kwargs = {'array_b': [7.]}
args = (container, [8.])
new_array_a, new_array_b = test_func_8(*args, **kwargs)
assert isinstance(container[0], torch.Tensor)
assert isinstance(container[1], torch.Tensor)
assert np.allclose(new_array_a, np.array([8.]), 1e-3)
assert np.allclose(new_array_b, np.array([7.]), 1e-3)
# wrong template arg type
with pytest.raises(TypeError):
new_array_a, new_array_b = test_func_9(container, 3 + 4j)
with pytest.raises(TypeError):
new_array_a, new_array_b = test_func_9(container, {})
# invalid template arg list
with pytest.raises(TypeError):
new_array_a, new_array_b = test_func_9(container,
[True, np.array([3.0])])
......@@ -206,7 +206,7 @@ parser.add_argument(
'--out-dir',
type=str,
default='./data/kitti',
required='False',
required=False,
help='name of info pkl')
parser.add_argument('--extra-tag', type=str, default='kitti')
parser.add_argument(
......
......@@ -5,7 +5,7 @@ from collections import OrderedDict
from nuscenes.utils.geometry_utils import view_points
from pathlib import Path
from mmdet3d.core.bbox import box_np_ops
from mmdet3d.core.bbox import box_np_ops, points_cam2img
from .kitti_data_utils import get_kitti_image_info, get_waymo_image_info
from .nuscenes_converter import post_process_coords
......@@ -471,7 +471,7 @@ def get_2d_boxes(info, occluded, mono3d=True):
repro_rec['velo_cam3d'] = -1 # no velocity in KITTI
center3d = np.array(loc).reshape([1, 3])
center2d = box_np_ops.points_cam2img(
center2d = points_cam2img(
center3d, camera_intrinsic, with_depth=True)
repro_rec['center2d'] = center2d.squeeze().tolist()
# normalized center2D + depth
......
......@@ -192,8 +192,10 @@ def _fill_trainval_infos(lyft,
names[i] = LyftDataset.NameMapping[names[i]]
names = np.array(names)
# we need to convert rot to SECOND format.
gt_boxes = np.concatenate([locs, dims, -rots - np.pi / 2], axis=1)
# we need to convert box size to
# the format of our lidar coordinate system
# which is dx, dy, dz (corresponding to l, w, h)
gt_boxes = np.concatenate([locs, dims[:, [1, 0, 2]], rots], axis=1)
assert len(gt_boxes) == len(
annotations), f'{len(gt_boxes)}, {len(annotations)}'
info['gt_boxes'] = gt_boxes
......
......@@ -10,7 +10,7 @@ from pyquaternion import Quaternion
from shapely.geometry import MultiPoint, box
from typing import List, Tuple, Union
from mmdet3d.core.bbox.box_np_ops import points_cam2img
from mmdet3d.core.bbox import points_cam2img
from mmdet3d.datasets import NuScenesDataset
nus_categories = ('car', 'truck', 'trailer', 'bus', 'construction_vehicle',
......@@ -249,8 +249,10 @@ def _fill_trainval_infos(nusc,
if names[i] in NuScenesDataset.NameMapping:
names[i] = NuScenesDataset.NameMapping[names[i]]
names = np.array(names)
# we need to convert rot to SECOND format.
gt_boxes = np.concatenate([locs, dims, -rots - np.pi / 2], axis=1)
# we need to convert box size to
# the format of our lidar coordinate system
# which is dx, dy, dz (corresponding to l, w, h)
gt_boxes = np.concatenate([locs, dims[:, [1, 0, 2]], rots], axis=1)
assert len(gt_boxes) == len(
annotations), f'{len(gt_boxes)}, {len(annotations)}'
info['gt_boxes'] = gt_boxes
......
......@@ -42,18 +42,17 @@ class SUNRGBDInstance(object):
self.ymax = data[2] + data[4]
self.box2d = np.array([self.xmin, self.ymin, self.xmax, self.ymax])
self.centroid = np.array([data[5], data[6], data[7]])
self.w = data[8]
self.l = data[9] # noqa: E741
self.h = data[10]
# data[9] is dx (l), data[8] is dy (w), data[10] is dz (h)
# in our depth coordinate system,
# l corresponds to the size along the x axis
self.size = np.array([data[9], data[8], data[10]]) * 2
self.orientation = np.zeros((3, ))
self.orientation[0] = data[11]
self.orientation[1] = data[12]
self.heading_angle = -1 * np.arctan2(self.orientation[1],
self.orientation[0])
self.box3d = np.concatenate([
self.centroid,
np.array([self.l * 2, self.w * 2, self.h * 2, self.heading_angle])
])
self.heading_angle = np.arctan2(self.orientation[1],
self.orientation[0])
self.box3d = np.concatenate(
[self.centroid, self.size, self.heading_angle[None]])
class SUNRGBDData(object):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment