Unverified Commit eb5a66ec authored by yinchimaoliang's avatar yinchimaoliang Committed by GitHub
Browse files

Add core unittests (#30)

* Add merge_aug_bboxes_3d unittest

* Add voxel_generator unittest

* Change test_merge_augs

* Add clean_data unittest

* Finish eval_class unittest

* Add kitti_eval unittest

* Add do_eval unittest

* Add gpu judgement for do_eval

* Change test_kitti_eval and test_voxel_generator

* Change to isclose

* Add unittests for bbox transform

* Add unittests for bbox transform

* Add unittests for bbox transform

* Add decode

* Add pred_split unittest

* Change allclose to eq
parent 6c63a681
...@@ -6,9 +6,43 @@ CommandLine: ...@@ -6,9 +6,43 @@ CommandLine:
""" """
import torch import torch
from mmdet3d.core.anchor import build_anchor_generator
def test_anchor_3d_range_generator():
if torch.cuda.is_available():
device = 'cuda'
else:
device = 'cpu'
anchor_generator_cfg = dict(
type='Anchor3DRangeGenerator',
ranges=[
[0, -39.68, -0.6, 70.4, 39.68, -0.6],
[0, -39.68, -0.6, 70.4, 39.68, -0.6],
[0, -39.68, -1.78, 70.4, 39.68, -1.78],
],
sizes=[[0.6, 0.8, 1.73], [0.6, 1.76, 1.73], [1.6, 3.9, 1.56]],
rotations=[0, 1.57],
reshape_out=False)
anchor_generator = build_anchor_generator(anchor_generator_cfg)
repr_str = repr(anchor_generator)
expected_repr_str = 'Anchor3DRangeGenerator(anchor_range=' \
'[[0, -39.68, -0.6, 70.4, 39.68, -0.6], ' \
'[0, -39.68, -0.6, 70.4, 39.68, -0.6], ' \
'[0, -39.68, -1.78, 70.4, 39.68, -1.78]],' \
'\nscales=[1],\nsizes=[[0.6, 0.8, 1.73], ' \
'[0.6, 1.76, 1.73], [1.6, 3.9, 1.56]],' \
'\nrotations=[0, 1.57],\nreshape_out=False,' \
'\nsize_per_range=True)'
assert repr_str == expected_repr_str
featmap_size = (256, 256)
mr_anchors = anchor_generator.single_level_grid_anchors(
featmap_size, 1.1, device=device)
assert mr_anchors.shape == torch.Size([1, 256, 256, 3, 2, 7])
def test_aligned_anchor_generator(): def test_aligned_anchor_generator():
from mmdet3d.core.anchor import build_anchor_generator
if torch.cuda.is_available(): if torch.cuda.is_available():
device = 'cuda' device = 'cuda'
else: else:
......
...@@ -5,13 +5,58 @@ import unittest ...@@ -5,13 +5,58 @@ import unittest
from mmdet3d.core.bbox import (BaseInstance3DBoxes, Box3DMode, from mmdet3d.core.bbox import (BaseInstance3DBoxes, Box3DMode,
CameraInstance3DBoxes, DepthInstance3DBoxes, CameraInstance3DBoxes, DepthInstance3DBoxes,
LiDARInstance3DBoxes) LiDARInstance3DBoxes, bbox3d2roi,
bbox3d_mapping_back)
from mmdet3d.core.bbox.structures.utils import (get_box_type, limit_period, from mmdet3d.core.bbox.structures.utils import (get_box_type, limit_period,
points_cam2img, points_cam2img,
rotation_3d_in_axis, rotation_3d_in_axis,
xywhr2xyxyr) xywhr2xyxyr)
def test_bbox3d_mapping_back():
bboxes = BaseInstance3DBoxes(
[[
-5.24223238e+00, 4.00209696e+01, 2.97570381e-01, 2.06200000e+00,
4.40900000e+00, 1.54800000e+00, -1.48801203e+00
],
[
-2.66751588e+01, 5.59499564e+00, -9.14345860e-01, 3.43000000e-01,
4.58000000e-01, 7.82000000e-01, -4.62759755e+00
],
[
-5.80979675e+00, 3.54092357e+01, 2.00889888e-01, 2.39600000e+00,
3.96900000e+00, 1.73200000e+00, -4.65203216e+00
],
[
-3.13086877e+01, 1.09007628e+00, -1.94612112e-01, 1.94400000e+00,
3.85700000e+00, 1.72300000e+00, -2.81427027e+00
]])
new_bboxes = bbox3d_mapping_back(bboxes, 1.1, True, True)
expected_new_bboxes = torch.tensor(
[[-4.7657, 36.3827, 0.2705, 1.8745, 4.0082, 1.4073, -1.4880],
[-24.2501, 5.0864, -0.8312, 0.3118, 0.4164, 0.7109, -4.6276],
[-5.2816, 32.1902, 0.1826, 2.1782, 3.6082, 1.5745, -4.6520],
[-28.4624, 0.9910, -0.1769, 1.7673, 3.5064, 1.5664, -2.8143]])
assert torch.allclose(new_bboxes.tensor, expected_new_bboxes, atol=1e-4)
def test_bbox3d2roi():
bbox_0 = torch.tensor(
[[-5.2422, 4.0020, 2.9757, 2.0620, 4.4090, 1.5480, -1.4880],
[-5.8097, 3.5409, 2.0088, 2.3960, 3.9690, 1.7320, -4.6520]])
bbox_1 = torch.tensor(
[[-2.6675, 5.5949, -9.1434, 3.4300, 4.5800, 7.8200, -4.6275],
[-3.1308, 1.0900, -1.9461, 1.9440, 3.8570, 1.7230, -2.8142]])
bbox_list = [bbox_0, bbox_1]
rois = bbox3d2roi(bbox_list)
expected_rois = torch.tensor(
[[0.0000, -5.2422, 4.0020, 2.9757, 2.0620, 4.4090, 1.5480, -1.4880],
[0.0000, -5.8097, 3.5409, 2.0088, 2.3960, 3.9690, 1.7320, -4.6520],
[1.0000, -2.6675, 5.5949, -9.1434, 3.4300, 4.5800, 7.8200, -4.6275],
[1.0000, -3.1308, 1.0900, -1.9461, 1.9440, 3.8570, 1.7230, -2.8142]])
assert torch.all(torch.eq(rois, expected_rois))
def test_base_boxes3d(): def test_base_boxes3d():
# test empty initialization # test empty initialization
empty_boxes = [] empty_boxes = []
......
import torch
from mmdet3d.core.bbox import DepthInstance3DBoxes
from mmdet.core import build_bbox_coder
def test_partial_bin_based_box_coder():
box_coder_cfg = dict(
type='PartialBinBasedBBoxCoder',
num_sizes=10,
num_dir_bins=12,
with_rot=True,
mean_sizes=[[2.114256, 1.620300, 0.927272],
[0.791118, 1.279516, 0.718182],
[0.923508, 1.867419, 0.845495],
[0.591958, 0.552978, 0.827272],
[0.699104, 0.454178, 0.75625],
[0.69519, 1.346299, 0.736364],
[0.528526, 1.002642, 1.172878],
[0.500618, 0.632163, 0.683424],
[0.404671, 1.071108, 1.688889],
[0.76584, 1.398258, 0.472728]])
box_coder = build_bbox_coder(box_coder_cfg)
# test eocode
gt_bboxes = DepthInstance3DBoxes(
[[0.8308, 4.1168, -1.2035, 2.2493, 1.8444, 1.9245, 1.6486],
[2.3002, 4.8149, -1.2442, 0.5718, 0.8629, 0.9510, 1.6030],
[-1.1477, 1.8090, -1.1725, 0.6965, 1.5273, 2.0563, 0.0552]])
gt_labels = torch.tensor([0, 1, 2])
center_target, size_class_target, size_res_target, dir_class_target, \
dir_res_target = box_coder.encode(gt_bboxes, gt_labels)
expected_center_target = torch.tensor([[0.8308, 4.1168, -0.2413],
[2.3002, 4.8149, -0.7687],
[-1.1477, 1.8090, -0.1444]])
expected_size_class_target = torch.tensor([0, 1, 2])
expected_size_res_target = torch.tensor([[0.1350, 0.2241, 0.9972],
[-0.2193, -0.4166, 0.2328],
[-0.2270, -0.3401, 1.2108]])
expected_dir_class_target = torch.tensor([3, 3, 0])
expected_dir_res_target = torch.tensor([0.0778, 0.0322, 0.0552])
assert torch.allclose(center_target, expected_center_target, atol=1e-4)
assert torch.all(size_class_target == expected_size_class_target)
assert torch.allclose(size_res_target, expected_size_res_target, atol=1e-4)
assert torch.all(dir_class_target == expected_dir_class_target)
assert torch.allclose(dir_res_target, expected_dir_res_target, atol=1e-4)
# test decode
center = torch.tensor([[[0.8014, 3.4134,
-0.6133], [2.6375, 8.4191, 2.0438],
[4.2017, 5.2504,
-0.7851], [-1.0088, 5.4107, 1.6293],
[1.4837, 4.0268, 0.6222]]])
size_class = torch.tensor([[[
-1.0061, -2.2788, 1.1322, -4.4380, -11.0526, -2.8113, -2.0642, -7.5886,
-4.8627, -5.0437
],
[
-2.2058, -0.3527, -1.9976, 0.8815, -2.7980,
-1.9053, -0.5097, -2.0232, -1.4242, -4.1192
],
[
-1.4783, -0.1009, -1.1537, 0.3052, -4.3147,
-2.6529, 0.2729, -0.3755, -2.6479, -3.7548
],
[
-6.1809, -3.5024, -8.3273, 1.1252, -4.3315,
-7.8288, -4.6091, -5.8153, 0.7480, -10.1396
],
[
-9.0424, -3.7883, -6.0788, -1.8855,
-10.2493, -9.7164, -1.0658, -4.1713,
1.1173, -10.6204
]]])
size_res = torch.tensor([[[[-9.8976e-02, -5.2152e-01, -7.6421e-02],
[1.4593e-01, 5.6099e-01, 8.9421e-02],
[5.1481e-02, 3.9280e-01, 1.2705e-01],
[3.6869e-01, 7.0558e-01, 1.4647e-01],
[4.7683e-01, 3.3644e-01, 2.3481e-01],
[8.7346e-02, 8.4987e-01, 3.3265e-01],
[2.1393e-01, 8.5585e-01, 9.8948e-02],
[7.8530e-02, 5.9694e-02, -8.7211e-02],
[1.8551e-01, 1.1308e+00, -5.1864e-01],
[3.6485e-01, 7.3757e-01, 1.5264e-01]],
[[-9.5593e-01, -5.0455e-01, 1.9554e-01],
[-1.0870e-01, 1.8025e-01, 1.0228e-01],
[-8.2882e-02, -4.3771e-01, 9.2135e-02],
[-4.0840e-02, -5.9841e-02, 1.1982e-01],
[7.3448e-02, 5.2045e-02, 1.7301e-01],
[-4.0440e-02, 4.9532e-02, 1.1266e-01],
[3.5857e-02, 1.3564e-02, 1.0212e-01],
[-1.0407e-01, -5.9321e-02, 9.2622e-02],
[7.4691e-03, 9.3080e-02, -4.4077e-01],
[-6.0121e-02, -1.3381e-01, -6.8083e-02]],
[[-9.3970e-01, -9.7823e-01, -5.1075e-02],
[-1.2843e-01, -1.8381e-01, 7.1327e-02],
[-1.2247e-01, -8.1115e-01, 3.6495e-02],
[4.9154e-02, -4.5440e-02, 8.9520e-02],
[1.5653e-01, 3.5990e-02, 1.6414e-01],
[-5.9621e-02, 4.9357e-03, 1.4264e-01],
[8.5235e-04, -1.0030e-01, -3.0712e-02],
[-3.7255e-02, 2.8996e-02, 5.5545e-02],
[3.9298e-02, -4.7420e-02, -4.9147e-01],
[-1.1548e-01, -1.5895e-01, -3.9155e-02]],
[[-1.8725e+00, -7.4102e-01, 1.0524e+00],
[-3.3210e-01, 4.7828e-02, -3.2666e-02],
[-2.7949e-01, 5.5541e-02, -1.0059e-01],
[-8.5533e-02, 1.4870e-01, -1.6709e-01],
[3.8283e-01, 2.6609e-01, 2.1361e-01],
[-4.2156e-01, 3.2455e-01, 6.7309e-01],
[-2.4336e-02, -8.3366e-02, 3.9913e-01],
[8.2142e-03, 4.8323e-02, -1.5247e-01],
[-4.8142e-02, -3.0074e-01, -1.6829e-01],
[1.3274e-01, -2.3825e-01, -1.8127e-01]],
[[-1.2576e+00, -6.1550e-01, 7.9430e-01],
[-4.7222e-01, 1.5634e+00, -5.9460e-02],
[-3.5367e-01, 1.3616e+00, -1.6421e-01],
[-1.6611e-02, 2.4231e-01, -9.6188e-02],
[5.4486e-01, 4.6833e-01, 5.1151e-01],
[-6.1755e-01, 1.0292e+00, 1.2458e+00],
[-6.8152e-02, 2.4786e-01, 9.5088e-01],
[-4.8745e-02, 1.5134e-01, -9.9962e-02],
[2.4485e-03, -7.5991e-02, 1.3545e-01],
[4.1608e-01, -1.2093e-01, -3.1643e-01]]]])
dir_class = torch.tensor([[[
-1.0230, -5.1965, -5.2195, 2.4030, -2.7661, -7.3399, -1.1640, -4.0630,
-5.2940, 0.8245, -3.1869, -6.1743
],
[
-1.9503, -1.6940, -0.8716, -1.1494, -0.8196,
0.2862, -0.2921, -0.7894, -0.2481, -0.9916,
-1.4304, -1.2466
],
[
-1.7435, -1.2043, -0.1265, 0.5083, -0.0717,
-0.9560, -1.6171, -2.6463, -2.3863, -2.1358,
-1.8812, -2.3117
],
[
-1.9282, 0.3792, -1.8426, -1.4587, -0.8582,
-3.4639, -3.2133, -3.7867, -7.6781, -6.4459,
-6.2455, -5.4797
],
[
-3.1869, 0.4456, -0.5824, 0.9994, -1.0554,
-8.4232, -7.7019, -7.1382, -10.2724,
-7.8229, -8.1860, -8.6194
]]])
dir_res = torch.tensor(
[[[
1.1022e-01, -2.3750e-01, 2.0381e-01, 1.2177e-01, -2.8501e-01,
1.5351e-01, 1.2218e-01, -2.0677e-01, 1.4468e-01, 1.1593e-01,
-2.6864e-01, 1.1290e-01
],
[
-1.5788e-02, 4.1538e-02, -2.2857e-04, -1.4011e-02, 4.2560e-02,
-3.1186e-03, -5.0343e-02, 6.8110e-03, -2.6728e-02, -3.2781e-02,
3.6889e-02, -1.5609e-03
],
[
1.9004e-02, 5.7105e-03, 6.0329e-02, 1.3074e-02, -2.5546e-02,
-1.1456e-02, -3.2484e-02, -3.3487e-02, 1.6609e-03, 1.7095e-02,
1.2647e-05, 2.4814e-02
],
[
1.4482e-01, -6.3083e-02, 5.8307e-02, 9.1396e-02, -8.4571e-02,
4.5890e-02, 5.6243e-02, -1.2448e-01, -9.5244e-02, 4.5746e-02,
-1.7390e-02, 9.0267e-02
],
[
1.8065e-01, -2.0078e-02, 8.5401e-02, 1.0784e-01, -1.2495e-01,
2.2796e-02, 1.1310e-01, -8.4364e-02, -1.1904e-01, 6.1180e-02,
-1.8109e-02, 1.1229e-01
]]])
bbox_out = dict(
center=center,
size_class=size_class,
size_res=size_res,
dir_class=dir_class,
dir_res=dir_res)
bbox3d = box_coder.decode(bbox_out)
expected_bbox3d = torch.tensor(
[[[0.8014, 3.4134, -0.6133, 0.9750, 2.2602, 0.9725, 1.6926],
[2.6375, 8.4191, 2.0438, 0.5511, 0.4931, 0.9471, 2.6149],
[4.2017, 5.2504, -0.7851, 0.6411, 0.5075, 0.9168, 1.5839],
[-1.0088, 5.4107, 1.6293, 0.5064, 0.7017, 0.6602, 0.4605],
[1.4837, 4.0268, 0.6222, 0.4071, 0.9951, 1.8243, 1.6786]]])
assert torch.allclose(bbox3d, expected_bbox3d, atol=1e-4)
# test split_pred
box_preds = torch.rand(2, 79, 256)
base_xyz = torch.rand(2, 256, 3)
results = box_coder.split_pred(box_preds, base_xyz)
obj_scores = results['obj_scores']
center = results['center']
dir_class = results['dir_class']
dir_res_norm = results['dir_res_norm']
dir_res = results['dir_res']
size_class = results['size_class']
size_res_norm = results['size_res_norm']
size_res = results['size_res']
sem_scores = results['sem_scores']
assert obj_scores.shape == torch.Size([2, 256, 2])
assert center.shape == torch.Size([2, 256, 3])
assert dir_class.shape == torch.Size([2, 256, 12])
assert dir_res_norm.shape == torch.Size([2, 256, 12])
assert dir_res.shape == torch.Size([2, 256, 12])
assert size_class.shape == torch.Size([2, 256, 10])
assert size_res_norm.shape == torch.Size([2, 256, 10, 3])
assert size_res.shape == torch.Size([2, 256, 10, 3])
assert sem_scores.shape == torch.Size([2, 256, 10])
...@@ -123,16 +123,15 @@ def test_indoor_eval(): ...@@ -123,16 +123,15 @@ def test_indoor_eval():
box_type_3d=DepthInstance3DBoxes, box_type_3d=DepthInstance3DBoxes,
box_mode_3d=Box3DMode.DEPTH) box_mode_3d=Box3DMode.DEPTH)
assert abs(ret_value['cabinet_AP_0.25'] - 0.666667) < 1e-3 assert np.isclose(ret_value['cabinet_AP_0.25'], 0.666667)
assert abs(ret_value['bed_AP_0.25'] - 1.0) < 1e-3 assert np.isclose(ret_value['bed_AP_0.25'], 1.0)
assert abs(ret_value['chair_AP_0.25'] - 0.5) < 1e-3 assert np.isclose(ret_value['chair_AP_0.25'], 0.5)
assert abs(ret_value['mAP_0.25'] - 0.708333) < 1e-3 assert np.isclose(ret_value['mAP_0.25'], 0.708333)
assert abs(ret_value['mAR_0.25'] - 0.833333) < 1e-3 assert np.isclose(ret_value['mAR_0.25'], 0.833333)
def test_average_precision(): def test_average_precision():
ap = average_precision( ap = average_precision(
np.array([[0.25, 0.5, 0.75], [0.25, 0.5, 0.75]]), np.array([[0.25, 0.5, 0.75], [0.25, 0.5, 0.75]]),
np.array([[1., 1., 1.], [1., 1., 1.]]), '11points') np.array([[1., 1., 1.], [1., 1., 1.]]), '11points')
print(ap[0])
assert abs(ap[0] - 0.06611571) < 0.001 assert abs(ap[0] - 0.06611571) < 0.001
import numpy as np
import pytest
import torch
from mmdet3d.core.evaluation.kitti_utils.eval import (do_eval, eval_class,
kitti_eval)
def test_do_eval():
if not torch.cuda.is_available():
pytest.skip('test requires GPU and CUDA')
gt_name = np.array(
['Pedestrian', 'Cyclist', 'Car', 'Car', 'Car', 'DontCare', 'DontCare'])
gt_truncated = np.array([0., 0., 0., -1., -1., -1., -1.])
gt_occluded = np.array([0, 0, 3, -1, -1, -1, -1])
gt_alpha = np.array([-1.57, 1.85, -1.65, -10., -10., -10., -10.])
gt_bbox = np.array([[674.9179, 165.48549, 693.23694, 193.42134],
[676.21954, 165.70988, 691.63745, 193.83748],
[389.4093, 182.48041, 421.49072, 202.13422],
[232.0577, 186.16724, 301.94623, 217.4024],
[758.6537, 172.98509, 816.32434, 212.76743],
[532.37, 176.35, 542.68, 185.27],
[559.62, 175.83, 575.4, 183.15]])
gt_dimensions = np.array([[12.34, 2.85, 2.63], [3.69, 1.67, 1.87],
[2.02, 1.86, 0.6], [-1., -1., -1.],
[-1., -1., -1.], [-1., -1., -1.],
[-1., -1., -1.]])
gt_location = np.array([[4.700e-01, 1.490e+00, 6.944e+01],
[-1.653e+01, 2.390e+00, 5.849e+01],
[4.590e+00, 1.320e+00, 4.584e+01],
[-1.000e+03, -1.000e+03, -1.000e+03],
[-1.000e+03, -1.000e+03, -1.000e+03],
[-1.000e+03, -1.000e+03, -1.000e+03],
[-1.000e+03, -1.000e+03, -1.000e+03]])
gt_rotation_y = [-1.56, 1.57, -1.55, -10., -10., -10., -10.]
gt_anno = dict(
name=gt_name,
truncated=gt_truncated,
occluded=gt_occluded,
alpha=gt_alpha,
bbox=gt_bbox,
dimensions=gt_dimensions,
location=gt_location,
rotation_y=gt_rotation_y)
dt_name = np.array(['Pedestrian', 'Cyclist', 'Car', 'Car', 'Car'])
dt_truncated = np.array([0., 0., 0., 0., 0.])
dt_occluded = np.array([0, 0, 0, 0, 0])
dt_alpha = np.array([1.0744612, 1.2775835, 1.82563, 2.1145396, -1.7676563])
dt_dimensions = np.array([[1.4441837, 1.7450154, 0.53160036],
[1.6501029, 1.7540325, 0.5162356],
[3.9313498, 1.4899347, 1.5655756],
[4.0111866, 1.5350999, 1.585221],
[3.7337692, 1.5117968, 1.5515774]])
dt_location = np.array([[4.6671643, 1.285098, 45.836895],
[4.658241, 1.3088846, 45.85148],
[-16.598526, 2.298814, 58.618088],
[-18.629122, 2.2990575, 39.305355],
[7.0964046, 1.5178275, 29.32426]])
dt_rotation_y = np.array(
[1.174933, 1.3778262, 1.550529, 1.6742425, -1.5330327])
dt_bbox = np.array([[674.9179, 165.48549, 693.23694, 193.42134],
[676.21954, 165.70988, 691.63745, 193.83748],
[389.4093, 182.48041, 421.49072, 202.13422],
[232.0577, 186.16724, 301.94623, 217.4024],
[758.6537, 172.98509, 816.32434, 212.76743]])
dt_score = np.array(
[0.18151495, 0.57920843, 0.27795696, 0.23100418, 0.21541929])
dt_anno = dict(
name=dt_name,
truncated=dt_truncated,
occluded=dt_occluded,
alpha=dt_alpha,
bbox=dt_bbox,
dimensions=dt_dimensions,
location=dt_location,
rotation_y=dt_rotation_y,
score=dt_score)
current_classes = [1, 2, 0]
min_overlaps = np.array([[[0.5, 0.5, 0.7], [0.5, 0.5, 0.7],
[0.5, 0.5, 0.7]],
[[0.5, 0.5, 0.7], [0.25, 0.25, 0.5],
[0.25, 0.25, 0.5]]])
eval_types = ['bbox', 'bev', '3d', 'aos']
mAP_bbox, mAP_bev, mAP_3d, mAP_aos = do_eval([gt_anno], [dt_anno],
current_classes, min_overlaps,
eval_types)
expected_mAP_bbox = np.array([[[0., 0.], [9.09090909, 9.09090909],
[9.09090909, 9.09090909]],
[[0., 0.], [9.09090909, 9.09090909],
[9.09090909, 9.09090909]],
[[0., 0.], [9.09090909, 9.09090909],
[9.09090909, 9.09090909]]])
expected_mAP_bev = np.array([[[0., 0.], [0., 0.], [0., 0.]],
[[0., 0.], [0., 0.], [0., 0.]],
[[0., 0.], [0., 0.], [0., 0.]]])
expected_mAP_3d = np.array([[[0., 0.], [0., 0.], [0., 0.]],
[[0., 0.], [0., 0.], [0., 0.]],
[[0., 0.], [0., 0.], [0., 0.]]])
expected_mAP_aos = np.array([[[0., 0.], [0.55020816, 0.55020816],
[0.55020816, 0.55020816]],
[[0., 0.], [8.36633862, 8.36633862],
[8.36633862, 8.36633862]],
[[0., 0.], [8.63476893, 8.63476893],
[8.63476893, 8.63476893]]])
assert np.allclose(mAP_bbox, expected_mAP_bbox)
assert np.allclose(mAP_bev, expected_mAP_bev)
assert np.allclose(mAP_3d, expected_mAP_3d)
assert np.allclose(mAP_aos, expected_mAP_aos)
def test_kitti_eval():
if not torch.cuda.is_available():
pytest.skip('test requires GPU and CUDA')
gt_name = np.array(
['Pedestrian', 'Cyclist', 'Car', 'Car', 'Car', 'DontCare', 'DontCare'])
gt_truncated = np.array([0., 0., 0., -1., -1., -1., -1.])
gt_occluded = np.array([0, 0, 3, -1, -1, -1, -1])
gt_alpha = np.array([-1.57, 1.85, -1.65, -10., -10., -10., -10.])
gt_bbox = np.array([[674.9179, 165.48549, 693.23694, 193.42134],
[676.21954, 165.70988, 691.63745, 193.83748],
[389.4093, 182.48041, 421.49072, 202.13422],
[232.0577, 186.16724, 301.94623, 217.4024],
[758.6537, 172.98509, 816.32434, 212.76743],
[532.37, 176.35, 542.68, 185.27],
[559.62, 175.83, 575.4, 183.15]])
gt_dimensions = np.array([[12.34, 2.85, 2.63], [3.69, 1.67, 1.87],
[2.02, 1.86, 0.6], [-1., -1., -1.],
[-1., -1., -1.], [-1., -1., -1.],
[-1., -1., -1.]])
gt_location = np.array([[4.700e-01, 1.490e+00, 6.944e+01],
[-1.653e+01, 2.390e+00, 5.849e+01],
[4.590e+00, 1.320e+00, 4.584e+01],
[-1.000e+03, -1.000e+03, -1.000e+03],
[-1.000e+03, -1.000e+03, -1.000e+03],
[-1.000e+03, -1.000e+03, -1.000e+03],
[-1.000e+03, -1.000e+03, -1.000e+03]])
gt_rotation_y = [-1.56, 1.57, -1.55, -10., -10., -10., -10.]
gt_anno = dict(
name=gt_name,
truncated=gt_truncated,
occluded=gt_occluded,
alpha=gt_alpha,
bbox=gt_bbox,
dimensions=gt_dimensions,
location=gt_location,
rotation_y=gt_rotation_y)
dt_name = np.array(['Pedestrian', 'Cyclist', 'Car', 'Car', 'Car'])
dt_truncated = np.array([0., 0., 0., 0., 0.])
dt_occluded = np.array([0, 0, 0, 0, 0])
dt_alpha = np.array([1.0744612, 1.2775835, 1.82563, 2.1145396, -1.7676563])
dt_dimensions = np.array([[1.4441837, 1.7450154, 0.53160036],
[1.6501029, 1.7540325, 0.5162356],
[3.9313498, 1.4899347, 1.5655756],
[4.0111866, 1.5350999, 1.585221],
[3.7337692, 1.5117968, 1.5515774]])
dt_location = np.array([[4.6671643, 1.285098, 45.836895],
[4.658241, 1.3088846, 45.85148],
[-16.598526, 2.298814, 58.618088],
[-18.629122, 2.2990575, 39.305355],
[7.0964046, 1.5178275, 29.32426]])
dt_rotation_y = np.array(
[1.174933, 1.3778262, 1.550529, 1.6742425, -1.5330327])
dt_bbox = np.array([[674.9179, 165.48549, 693.23694, 193.42134],
[676.21954, 165.70988, 691.63745, 193.83748],
[389.4093, 182.48041, 421.49072, 202.13422],
[232.0577, 186.16724, 301.94623, 217.4024],
[758.6537, 172.98509, 816.32434, 212.76743]])
dt_score = np.array(
[0.18151495, 0.57920843, 0.27795696, 0.23100418, 0.21541929])
dt_anno = dict(
name=dt_name,
truncated=dt_truncated,
occluded=dt_occluded,
alpha=dt_alpha,
bbox=dt_bbox,
dimensions=dt_dimensions,
location=dt_location,
rotation_y=dt_rotation_y,
score=dt_score)
current_classes = [1, 2, 0]
result, ret_dict = kitti_eval([gt_anno], [dt_anno], current_classes)
assert np.isclose(ret_dict['KITTI/Overall_2D_moderate'], 9.090909090909092)
assert np.isclose(ret_dict['KITTI/Overall_2D_hard'], 9.090909090909092)
def test_eval_class():
gt_name = np.array(
['Pedestrian', 'Cyclist', 'Car', 'Car', 'Car', 'DontCare', 'DontCare'])
gt_truncated = np.array([0., 0., 0., -1., -1., -1., -1.])
gt_occluded = np.array([0, 0, 3, -1, -1, -1, -1])
gt_alpha = np.array([-1.57, 1.85, -1.65, -10., -10., -10., -10.])
gt_bbox = np.array([[674.9179, 165.48549, 693.23694, 193.42134],
[676.21954, 165.70988, 691.63745, 193.83748],
[389.4093, 182.48041, 421.49072, 202.13422],
[232.0577, 186.16724, 301.94623, 217.4024],
[758.6537, 172.98509, 816.32434, 212.76743],
[532.37, 176.35, 542.68, 185.27],
[559.62, 175.83, 575.4, 183.15]])
gt_anno = dict(
name=gt_name,
truncated=gt_truncated,
occluded=gt_occluded,
alpha=gt_alpha,
bbox=gt_bbox)
dt_name = np.array(['Pedestrian', 'Cyclist', 'Car', 'Car', 'Car'])
dt_truncated = np.array([0., 0., 0., 0., 0.])
dt_occluded = np.array([0, 0, 0, 0, 0])
dt_alpha = np.array([1.0744612, 1.2775835, 1.82563, 2.1145396, -1.7676563])
dt_bbox = np.array([[674.9179, 165.48549, 693.23694, 193.42134],
[676.21954, 165.70988, 691.63745, 193.83748],
[389.4093, 182.48041, 421.49072, 202.13422],
[232.0577, 186.16724, 301.94623, 217.4024],
[758.6537, 172.98509, 816.32434, 212.76743]])
dt_score = np.array(
[0.18151495, 0.57920843, 0.27795696, 0.23100418, 0.21541929])
dt_anno = dict(
name=dt_name,
truncated=dt_truncated,
occluded=dt_occluded,
alpha=dt_alpha,
bbox=dt_bbox,
score=dt_score)
current_classes = [1, 2, 0]
difficultys = [0, 1, 2]
metric = 0
min_overlaps = np.array([[[0.5, 0.5, 0.7], [0.5, 0.5, 0.7],
[0.5, 0.5, 0.7]],
[[0.5, 0.5, 0.7], [0.25, 0.25, 0.5],
[0.25, 0.25, 0.5]]])
ret_dict = eval_class([gt_anno], [dt_anno], current_classes, difficultys,
metric, min_overlaps, True, 1)
recall_sum = np.sum(ret_dict['recall'])
precision_sum = np.sum(ret_dict['precision'])
orientation_sum = np.sum(ret_dict['orientation'])
assert np.isclose(recall_sum, 16)
assert np.isclose(precision_sum, 16)
assert np.isclose(orientation_sum, 10.252829201850309)
import mmcv
import pytest
import torch
from mmdet3d.core import merge_aug_bboxes_3d
from mmdet3d.core.bbox import DepthInstance3DBoxes
def test_merge_aug_bboxes_3d():
if not torch.cuda.is_available():
pytest.skip('test requires GPU and torch+cuda')
img_meta_0 = dict(
pcd_horizontal_flip=False,
pcd_vertical_flip=True,
pcd_scale_factor=1.0)
img_meta_1 = dict(
pcd_horizontal_flip=True,
pcd_vertical_flip=False,
pcd_scale_factor=1.0)
img_meta_2 = dict(
pcd_horizontal_flip=False,
pcd_vertical_flip=False,
pcd_scale_factor=0.5)
img_metas = [[img_meta_0], [img_meta_1], [img_meta_2]]
boxes_3d = DepthInstance3DBoxes(
torch.tensor(
[[1.0473, 4.1687, -1.2317, 2.3021, 1.8876, 1.9696, 1.6956],
[2.5831, 4.8117, -1.2733, 0.5852, 0.8832, 0.9733, 1.6500],
[-1.0864, 1.9045, -1.2000, 0.7128, 1.5631, 2.1045, 0.1022]],
device='cuda'))
labels_3d = torch.tensor([0, 7, 6])
scores_3d = torch.tensor([0.5, 1.0, 1.0])
aug_result = dict(
boxes_3d=boxes_3d, labels_3d=labels_3d, scores_3d=scores_3d)
aug_results = [aug_result, aug_result, aug_result]
test_cfg = mmcv.ConfigDict(
use_rotate_nms=True,
nms_across_levels=False,
nms_thr=0.01,
score_thr=0.1,
min_bbox_size=0,
nms_pre=100,
max_num=50)
results = merge_aug_bboxes_3d(aug_results, img_metas, test_cfg)
expected_boxes_3d = torch.tensor(
[[-1.0864, -1.9045, -1.2000, 0.7128, 1.5631, 2.1045, -0.1022],
[1.0864, 1.9045, -1.2000, 0.7128, 1.5631, 2.1045, 3.0394],
[-2.1728, 3.8090, -2.4000, 1.4256, 3.1262, 4.2090, 0.1022],
[2.5831, -4.8117, -1.2733, 0.5852, 0.8832, 0.9733, -1.6500],
[-2.5831, 4.8117, -1.2733, 0.5852, 0.8832, 0.9733, 1.4916],
[5.1662, 9.6234, -2.5466, 1.1704, 1.7664, 1.9466, 1.6500],
[1.0473, -4.1687, -1.2317, 2.3021, 1.8876, 1.9696, -1.6956],
[-1.0473, 4.1687, -1.2317, 2.3021, 1.8876, 1.9696, 1.4460],
[2.0946, 8.3374, -2.4634, 4.6042, 3.7752, 3.9392, 1.6956]])
expected_scores_3d = torch.tensor([
1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 0.5000, 0.5000, 0.5000
])
expected_labels_3d = torch.tensor([6, 6, 6, 7, 7, 7, 0, 0, 0])
assert torch.allclose(results['boxes_3d'].tensor, expected_boxes_3d)
assert torch.allclose(results['scores_3d'], expected_scores_3d)
assert torch.all(results['labels_3d'] == expected_labels_3d)
import numpy as np
from mmdet3d.core.voxel.voxel_generator import VoxelGenerator
def test_voxel_generator():
np.random.seed(0)
voxel_size = [0.5, 0.5, 0.5]
point_cloud_range = [0, -40, -3, 70.4, 40, 1]
max_num_points = 1000
self = VoxelGenerator(voxel_size, point_cloud_range, max_num_points)
points = np.random.rand(1000, 4)
voxels = self.generate(points)
coors, voxels, num_points_per_voxel = voxels
expected_voxels = np.array([[7, 81, 1], [6, 81, 0], [7, 80, 1], [6, 81, 1],
[7, 81, 0], [6, 80, 1], [7, 80, 0], [6, 80,
0]])
expected_num_points_per_voxel = np.array(
[120, 121, 127, 134, 115, 127, 125, 131])
assert np.all(voxels == expected_voxels)
assert coors.shape == (8, 1000, 4)
assert np.all(num_points_per_voxel == expected_num_points_per_voxel)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment