Unverified Commit 4e3cfbe4 authored by VVsssssk's avatar VVsssssk Committed by GitHub
Browse files

[Fix] Fix tests dir (#1704)

* format ut dir tree

* ad pytest skip
parent 4a3f90f6
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmcv.cnn import Scale
from torch import nn as nn
from mmdet3d.registry import TASK_UTILS
def test_fcos3d_bbox_coder():
# test a config without priors
bbox_coder_cfg = dict(
type='FCOS3DBBoxCoder',
base_depths=None,
base_dims=None,
code_size=7,
norm_on_bbox=True)
bbox_coder = TASK_UTILS.build(bbox_coder_cfg)
# test decode
# [2, 7, 1, 1]
batch_bbox = torch.tensor([[[[0.3130]], [[0.7094]], [[0.8743]], [[0.0570]],
[[0.5579]], [[0.1593]], [[0.4553]]],
[[[0.7758]], [[0.2298]], [[0.3925]], [[0.6307]],
[[0.4377]], [[0.3339]], [[0.1966]]]])
batch_scale = nn.ModuleList([Scale(1.0) for _ in range(3)])
stride = 2
training = False
cls_score = torch.randn([2, 2, 1, 1]).sigmoid()
decode_bbox = bbox_coder.decode(batch_bbox, batch_scale, stride, training,
cls_score)
expected_bbox = torch.tensor([[[[0.6261]], [[1.4188]], [[2.3971]],
[[1.0586]], [[1.7470]], [[1.1727]],
[[0.4553]]],
[[[1.5516]], [[0.4596]], [[1.4806]],
[[1.8790]], [[1.5492]], [[1.3965]],
[[0.1966]]]])
assert torch.allclose(decode_bbox, expected_bbox, atol=1e-3)
# test a config with priors
prior_bbox_coder_cfg = dict(
type='FCOS3DBBoxCoder',
base_depths=((28., 13.), (25., 12.)),
base_dims=((2., 3., 1.), (1., 2., 3.)),
code_size=7,
norm_on_bbox=True)
prior_bbox_coder = TASK_UTILS.build(prior_bbox_coder_cfg)
# test decode
batch_bbox = torch.tensor([[[[0.3130]], [[0.7094]], [[0.8743]], [[0.0570]],
[[0.5579]], [[0.1593]], [[0.4553]]],
[[[0.7758]], [[0.2298]], [[0.3925]], [[0.6307]],
[[0.4377]], [[0.3339]], [[0.1966]]]])
batch_scale = nn.ModuleList([Scale(1.0) for _ in range(3)])
stride = 2
training = False
cls_score = torch.tensor([[[[0.5811]], [[0.6198]]], [[[0.4889]],
[[0.8142]]]])
decode_bbox = prior_bbox_coder.decode(batch_bbox, batch_scale, stride,
training, cls_score)
expected_bbox = torch.tensor([[[[0.6260]], [[1.4188]], [[35.4916]],
[[1.0587]], [[3.4940]], [[3.5181]],
[[0.4553]]],
[[[1.5516]], [[0.4596]], [[29.7100]],
[[1.8789]], [[3.0983]], [[4.1892]],
[[0.1966]]]])
assert torch.allclose(decode_bbox, expected_bbox, atol=1e-3)
# test decode_yaw
decode_bbox = decode_bbox.permute(0, 2, 3, 1).view(-1, 7)
batch_centers2d = torch.tensor([[100., 150.], [200., 100.]])
batch_dir_cls = torch.tensor([0., 1.])
dir_offset = 0.7854
cam2img = torch.tensor([[700., 0., 450., 0.], [0., 700., 200., 0.],
[0., 0., 1., 0.], [0., 0., 0., 1.]])
decode_bbox = prior_bbox_coder.decode_yaw(decode_bbox, batch_centers2d,
batch_dir_cls, dir_offset,
cam2img)
expected_bbox = torch.tensor(
[[0.6260, 1.4188, 35.4916, 1.0587, 3.4940, 3.5181, 3.1332],
[1.5516, 0.4596, 29.7100, 1.8789, 3.0983, 4.1892, 6.1368]])
assert torch.allclose(decode_bbox, expected_bbox, atol=1e-3)
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
from mmdet3d.registry import TASK_UTILS
from mmdet3d.structures import CameraInstance3DBoxes
def test_monoflex_bbox_coder():
bbox_coder_cfg = dict(
type='MonoFlexCoder',
depth_mode='exp',
base_depth=(26.494627, 16.05988),
depth_range=[0.1, 100],
combine_depth=True,
uncertainty_range=[-10, 10],
base_dims=((3.8840, 1.5261, 1.6286, 0.4259, 0.1367,
0.1022), (0.8423, 1.7607, 0.6602, 0.2349, 0.1133, 0.1427),
(1.7635, 1.7372, 0.5968, 0.1766, 0.0948, 0.1242)),
dims_mode='linear',
multibin=True,
num_dir_bins=4,
bin_centers=[0, np.pi / 2, np.pi, -np.pi / 2],
bin_margin=np.pi / 6,
code_size=7)
bbox_coder = TASK_UTILS.build(bbox_coder_cfg)
gt_bboxes_3d = CameraInstance3DBoxes(torch.rand([6, 7]))
orientation_target = bbox_coder.encode(gt_bboxes_3d)
assert orientation_target.shape == torch.Size([6, 8])
regression = torch.rand([100, 50])
base_centers2d = torch.rand([100, 2])
labels = torch.ones([100])
downsample_ratio = 4
cam2imgs = torch.rand([100, 4, 4])
preds = bbox_coder.decode(regression, base_centers2d, labels,
downsample_ratio, cam2imgs)
assert preds['bboxes2d'].shape == torch.Size([100, 4])
assert preds['dimensions'].shape == torch.Size([100, 3])
assert preds['offsets2d'].shape == torch.Size([100, 2])
assert preds['keypoints2d'].shape == torch.Size([100, 10, 2])
assert preds['orientations'].shape == torch.Size([100, 16])
assert preds['direct_depth'].shape == torch.Size([
100,
])
assert preds['keypoints_depth'].shape == torch.Size([100, 3])
assert preds['combined_depth'].shape == torch.Size([
100,
])
assert preds['direct_depth_uncertainty'].shape == torch.Size([
100,
])
assert preds['keypoints_depth_uncertainty'].shape == torch.Size([100, 3])
offsets_2d = torch.randn([100, 2])
depths = torch.randn([
100,
])
locations = bbox_coder.decode_location(base_centers2d, offsets_2d, depths,
cam2imgs, downsample_ratio)
assert locations.shape == torch.Size([100, 3])
orientations = torch.randn([100, 16])
yaws, local_yaws = bbox_coder.decode_orientation(orientations, locations)
assert yaws.shape == torch.Size([
100,
])
assert local_yaws.shape == torch.Size([
100,
])
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
from mmcv.cnn import Scale
from torch import nn as nn
from mmdet3d.registry import TASK_UTILS
from mmdet3d.structures import (CameraInstance3DBoxes, DepthInstance3DBoxes,
LiDARInstance3DBoxes)
from mmdet3d.structures import DepthInstance3DBoxes
def test_partial_bin_based_box_coder():
......@@ -221,445 +217,3 @@ def test_partial_bin_based_box_coder():
assert size_res_norm.shape == torch.Size([2, 256, 10, 3])
assert size_res.shape == torch.Size([2, 256, 10, 3])
assert sem_scores.shape == torch.Size([2, 256, 10])
def test_anchor_free_box_coder():
box_coder_cfg = dict(
type='AnchorFreeBBoxCoder', num_dir_bins=12, with_rot=True)
box_coder = TASK_UTILS.build(box_coder_cfg)
# test encode
gt_bboxes = LiDARInstance3DBoxes([[
2.1227e+00, 5.7951e+00, -9.9900e-01, 1.6736e+00, 4.2419e+00,
1.5473e+00, -1.5501e+00
],
[
1.1791e+01, 9.0276e+00, -8.5772e-01,
1.6210e+00, 3.5367e+00, 1.4841e+00,
-1.7369e+00
],
[
2.3638e+01, 9.6997e+00, -5.6713e-01,
1.7578e+00, 4.6103e+00, 1.5999e+00,
-1.4556e+00
]])
gt_labels = torch.tensor([0, 0, 0])
(center_targets, size_targets, dir_class_targets,
dir_res_targets) = box_coder.encode(gt_bboxes, gt_labels)
expected_center_target = torch.tensor([[2.1227, 5.7951, -0.2253],
[11.7908, 9.0276, -0.1156],
[23.6380, 9.6997, 0.2328]])
expected_size_targets = torch.tensor([[0.8368, 2.1210, 0.7736],
[0.8105, 1.7683, 0.7421],
[0.8789, 2.3052, 0.8000]])
expected_dir_class_target = torch.tensor([9, 9, 9])
expected_dir_res_target = torch.tensor([0.0394, -0.3172, 0.2199])
assert torch.allclose(center_targets, expected_center_target, atol=1e-4)
assert torch.allclose(size_targets, expected_size_targets, atol=1e-4)
assert torch.all(dir_class_targets == expected_dir_class_target)
assert torch.allclose(dir_res_targets, expected_dir_res_target, atol=1e-3)
# test decode
center = torch.tensor([[[14.5954, 6.3312, 0.7671],
[67.5245, 22.4422, 1.5610],
[47.7693, -6.7980, 1.4395]]])
size_res = torch.tensor([[[-1.0752, 1.8760, 0.7715],
[-0.8016, 1.1754, 0.0102],
[-1.2789, 0.5948, 0.4728]]])
dir_class = torch.tensor([[[
0.1512, 1.7914, -1.7658, 2.1572, -0.9215, 1.2139, 0.1749, 0.8606,
1.1743, -0.7679, -1.6005, 0.4623
],
[
-0.3957, 1.2026, -1.2677, 1.3863, -0.5754,
1.7083, 0.2601, 0.1129, 0.7146, -0.1367,
-1.2892, -0.0083
],
[
-0.8862, 1.2050, -1.3881, 1.6604, -0.9087,
1.1907, -0.0280, 0.2027, 1.0644, -0.7205,
-1.0738, 0.4748
]]])
dir_res = torch.tensor([[[
1.1151, 0.5535, -0.2053, -0.6582, -0.1616, -0.1821, 0.4675, 0.6621,
0.8146, -0.0448, -0.7253, -0.7171
],
[
0.7888, 0.2478, -0.1962, -0.7267, 0.0573,
-0.2398, 0.6984, 0.5859, 0.7507, -0.1980,
-0.6538, -0.6602
],
[
0.9039, 0.6109, 0.1960, -0.5016, 0.0551,
-0.4086, 0.3398, 0.2759, 0.7247, -0.0655,
-0.5052, -0.9026
]]])
bbox_out = dict(
center=center, size=size_res, dir_class=dir_class, dir_res=dir_res)
bbox3d = box_coder.decode(bbox_out)
expected_bbox3d = torch.tensor(
[[[14.5954, 6.3312, 0.7671, 0.1000, 3.7521, 1.5429, 0.9126],
[67.5245, 22.4422, 1.5610, 0.1000, 2.3508, 0.1000, 2.3782],
[47.7693, -6.7980, 1.4395, 0.1000, 1.1897, 0.9456, 1.0692]]])
assert torch.allclose(bbox3d, expected_bbox3d, atol=1e-4)
# test split_pred
cls_preds = torch.rand(2, 1, 256)
reg_preds = torch.rand(2, 30, 256)
base_xyz = torch.rand(2, 256, 3)
results = box_coder.split_pred(cls_preds, reg_preds, base_xyz)
obj_scores = results['obj_scores']
center = results['center']
center_offset = results['center_offset']
dir_class = results['dir_class']
dir_res_norm = results['dir_res_norm']
dir_res = results['dir_res']
size = results['size']
assert obj_scores.shape == torch.Size([2, 1, 256])
assert center.shape == torch.Size([2, 256, 3])
assert center_offset.shape == torch.Size([2, 256, 3])
assert dir_class.shape == torch.Size([2, 256, 12])
assert dir_res_norm.shape == torch.Size([2, 256, 12])
assert dir_res.shape == torch.Size([2, 256, 12])
assert size.shape == torch.Size([2, 256, 3])
def test_centerpoint_bbox_coder():
bbox_coder_cfg = dict(
type='CenterPointBBoxCoder',
post_center_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0],
max_num=500,
score_threshold=0.1,
pc_range=[-51.2, -51.2],
out_size_factor=4,
voxel_size=[0.2, 0.2])
bbox_coder = TASK_UTILS.build(bbox_coder_cfg)
batch_dim = torch.rand([2, 3, 128, 128])
batch_hei = torch.rand([2, 1, 128, 128])
batch_hm = torch.rand([2, 2, 128, 128])
batch_reg = torch.rand([2, 2, 128, 128])
batch_rotc = torch.rand([2, 1, 128, 128])
batch_rots = torch.rand([2, 1, 128, 128])
batch_vel = torch.rand([2, 2, 128, 128])
temp = bbox_coder.decode(batch_hm, batch_rots, batch_rotc, batch_hei,
batch_dim, batch_vel, batch_reg, 5)
for i in range(len(temp)):
assert temp[i]['bboxes'].shape == torch.Size([500, 9])
assert temp[i]['scores'].shape == torch.Size([500])
assert temp[i]['labels'].shape == torch.Size([500])
def test_point_xyzwhlr_bbox_coder():
bbox_coder_cfg = dict(
type='PointXYZWHLRBBoxCoder',
use_mean_size=True,
mean_size=[[3.9, 1.6, 1.56], [0.8, 0.6, 1.73], [1.76, 0.6, 1.73]])
boxcoder = TASK_UTILS.build(bbox_coder_cfg)
# test encode
gt_bboxes_3d = torch.tensor(
[[13.3329, 2.3514, -0.7004, 1.7508, 0.4702, 1.7909, -3.0522],
[2.2068, -2.6994, -0.3277, 3.8703, 1.6602, 1.6913, -1.9057],
[5.5269, 2.5085, -1.0129, 1.1496, 0.8006, 1.8887, 2.1756]])
points = torch.tensor([[13.70, 2.40, 0.12], [3.20, -3.00, 0.2],
[5.70, 2.20, -0.4]])
gt_labels_3d = torch.tensor([2, 0, 1])
bbox_target = boxcoder.encode(gt_bboxes_3d, points, gt_labels_3d)
expected_bbox_target = torch.tensor([[
-0.1974, -0.0261, -0.4742, -0.0052, -0.2438, 0.0346, -0.9960, -0.0893
], [-0.2356, 0.0713, -0.3383, -0.0076, 0.0369, 0.0808, -0.3287, -0.9444
], [-0.1731, 0.3085, -0.3543, 0.3626, 0.2884, 0.0878, -0.5686,
0.8226]])
assert torch.allclose(expected_bbox_target, bbox_target, atol=1e-4)
# test decode
bbox3d_out = boxcoder.decode(bbox_target, points, gt_labels_3d)
assert torch.allclose(bbox3d_out, gt_bboxes_3d, atol=1e-4)
def test_fcos3d_bbox_coder():
# test a config without priors
bbox_coder_cfg = dict(
type='FCOS3DBBoxCoder',
base_depths=None,
base_dims=None,
code_size=7,
norm_on_bbox=True)
bbox_coder = TASK_UTILS.build(bbox_coder_cfg)
# test decode
# [2, 7, 1, 1]
batch_bbox = torch.tensor([[[[0.3130]], [[0.7094]], [[0.8743]], [[0.0570]],
[[0.5579]], [[0.1593]], [[0.4553]]],
[[[0.7758]], [[0.2298]], [[0.3925]], [[0.6307]],
[[0.4377]], [[0.3339]], [[0.1966]]]])
batch_scale = nn.ModuleList([Scale(1.0) for _ in range(3)])
stride = 2
training = False
cls_score = torch.randn([2, 2, 1, 1]).sigmoid()
decode_bbox = bbox_coder.decode(batch_bbox, batch_scale, stride, training,
cls_score)
expected_bbox = torch.tensor([[[[0.6261]], [[1.4188]], [[2.3971]],
[[1.0586]], [[1.7470]], [[1.1727]],
[[0.4553]]],
[[[1.5516]], [[0.4596]], [[1.4806]],
[[1.8790]], [[1.5492]], [[1.3965]],
[[0.1966]]]])
assert torch.allclose(decode_bbox, expected_bbox, atol=1e-3)
# test a config with priors
prior_bbox_coder_cfg = dict(
type='FCOS3DBBoxCoder',
base_depths=((28., 13.), (25., 12.)),
base_dims=((2., 3., 1.), (1., 2., 3.)),
code_size=7,
norm_on_bbox=True)
prior_bbox_coder = TASK_UTILS.build(prior_bbox_coder_cfg)
# test decode
batch_bbox = torch.tensor([[[[0.3130]], [[0.7094]], [[0.8743]], [[0.0570]],
[[0.5579]], [[0.1593]], [[0.4553]]],
[[[0.7758]], [[0.2298]], [[0.3925]], [[0.6307]],
[[0.4377]], [[0.3339]], [[0.1966]]]])
batch_scale = nn.ModuleList([Scale(1.0) for _ in range(3)])
stride = 2
training = False
cls_score = torch.tensor([[[[0.5811]], [[0.6198]]], [[[0.4889]],
[[0.8142]]]])
decode_bbox = prior_bbox_coder.decode(batch_bbox, batch_scale, stride,
training, cls_score)
expected_bbox = torch.tensor([[[[0.6260]], [[1.4188]], [[35.4916]],
[[1.0587]], [[3.4940]], [[3.5181]],
[[0.4553]]],
[[[1.5516]], [[0.4596]], [[29.7100]],
[[1.8789]], [[3.0983]], [[4.1892]],
[[0.1966]]]])
assert torch.allclose(decode_bbox, expected_bbox, atol=1e-3)
# test decode_yaw
decode_bbox = decode_bbox.permute(0, 2, 3, 1).view(-1, 7)
batch_centers2d = torch.tensor([[100., 150.], [200., 100.]])
batch_dir_cls = torch.tensor([0., 1.])
dir_offset = 0.7854
cam2img = torch.tensor([[700., 0., 450., 0.], [0., 700., 200., 0.],
[0., 0., 1., 0.], [0., 0., 0., 1.]])
decode_bbox = prior_bbox_coder.decode_yaw(decode_bbox, batch_centers2d,
batch_dir_cls, dir_offset,
cam2img)
expected_bbox = torch.tensor(
[[0.6260, 1.4188, 35.4916, 1.0587, 3.4940, 3.5181, 3.1332],
[1.5516, 0.4596, 29.7100, 1.8789, 3.0983, 4.1892, 6.1368]])
assert torch.allclose(decode_bbox, expected_bbox, atol=1e-3)
def test_pgd_bbox_coder():
# test a config without priors
bbox_coder_cfg = dict(
type='PGDBBoxCoder',
base_depths=None,
base_dims=None,
code_size=7,
norm_on_bbox=True)
bbox_coder = TASK_UTILS.build(bbox_coder_cfg)
# test decode_2d
# [2, 27, 1, 1]
batch_bbox = torch.tensor([[[[0.0103]], [[0.7394]], [[0.3296]], [[0.4708]],
[[0.1439]], [[0.0778]], [[0.9399]], [[0.8366]],
[[0.1264]], [[0.3030]], [[0.1898]], [[0.0714]],
[[0.4144]], [[0.4341]], [[0.6442]], [[0.2951]],
[[0.2890]], [[0.4486]], [[0.2848]], [[0.1071]],
[[0.9530]], [[0.9460]], [[0.3822]], [[0.9320]],
[[0.2611]], [[0.5580]], [[0.0397]]],
[[[0.8612]], [[0.1680]], [[0.5167]], [[0.8502]],
[[0.0377]], [[0.3615]], [[0.9550]], [[0.5219]],
[[0.1402]], [[0.6843]], [[0.2121]], [[0.9468]],
[[0.6238]], [[0.7918]], [[0.1646]], [[0.0500]],
[[0.6290]], [[0.3956]], [[0.2901]], [[0.4612]],
[[0.7333]], [[0.1194]], [[0.6999]], [[0.3980]],
[[0.3262]], [[0.7185]], [[0.4474]]]])
batch_scale = nn.ModuleList([Scale(1.0) for _ in range(5)])
stride = 2
training = False
cls_score = torch.randn([2, 2, 1, 1]).sigmoid()
decode_bbox = bbox_coder.decode(batch_bbox, batch_scale, stride, training,
cls_score)
max_regress_range = 16
pred_keypoints = True
pred_bbox2d = True
decode_bbox_w2d = bbox_coder.decode_2d(decode_bbox, batch_scale, stride,
max_regress_range, training,
pred_keypoints, pred_bbox2d)
expected_decode_bbox_w2d = torch.tensor(
[[[[0.0206]], [[1.4788]],
[[1.3904]], [[1.6013]], [[1.1548]], [[1.0809]], [[0.9399]],
[[10.9441]], [[2.0117]], [[4.7049]], [[3.0009]], [[1.1405]],
[[6.2752]], [[6.5399]], [[9.0840]], [[4.5892]], [[4.4994]],
[[6.7320]], [[4.4375]], [[1.7071]], [[11.8582]], [[11.8075]],
[[5.8339]], [[1.8640]], [[0.5222]], [[1.1160]], [[0.0794]]],
[[[1.7224]], [[0.3360]], [[1.6765]], [[2.3401]], [[1.0384]],
[[1.4355]], [[0.9550]], [[7.6666]], [[2.2286]], [[9.5089]],
[[3.3436]], [[11.8133]], [[8.8603]], [[10.5508]], [[2.6101]],
[[0.7993]], [[8.9178]], [[6.0188]], [[4.5156]], [[6.8970]],
[[10.0013]], [[1.9014]], [[9.6689]], [[0.7960]], [[0.6524]],
[[1.4370]], [[0.8948]]]])
assert torch.allclose(expected_decode_bbox_w2d, decode_bbox_w2d, atol=1e-3)
# test decode_prob_depth
# [10, 8]
depth_cls_preds = torch.tensor([
[-0.4383, 0.7207, -0.4092, 0.4649, 0.8526, 0.6186, -1.4312, -0.7150],
[0.0621, 0.2369, 0.5170, 0.8484, -0.1099, 0.1829, -0.0072, 1.0618],
[-1.6114, -0.1057, 0.5721, -0.5986, -2.0471, 0.8140, -0.8385, -0.4822],
[0.0742, -0.3261, 0.4607, 1.8155, -0.3571, -0.0234, 0.3787, 2.3251],
[1.0492, -0.6881, -0.0136, -1.8291, 0.8460, -1.0171, 2.5691, -0.8114],
[0.0968, -0.5601, 1.0458, 0.2560, 1.3018, 0.1635, 0.0680, -1.0263],
[-0.0765, 0.1498, -2.7321, 1.0047, -0.2505, 0.0871, -0.4820, -0.3003],
[-0.4123, 0.2298, -0.1330, -0.6008, 0.6526, 0.7118, 0.9728, -0.7793],
[1.6940, 0.3355, 1.4661, 0.5477, 0.8667, 0.0527, -0.9975, -0.0689],
[0.4724, -0.3632, -0.0654, 0.4034, -0.3494, -0.7548, 0.7297, 1.2754]
])
depth_range = (0, 70)
depth_unit = 10
num_depth_cls = 8
uniform_prob_depth_preds = bbox_coder.decode_prob_depth(
depth_cls_preds, depth_range, depth_unit, 'uniform', num_depth_cls)
expected_preds = torch.tensor([
32.0441, 38.4689, 36.1831, 48.2096, 46.1560, 32.7973, 33.2155, 39.9822,
21.9905, 43.0161
])
assert torch.allclose(uniform_prob_depth_preds, expected_preds, atol=1e-3)
linear_prob_depth_preds = bbox_coder.decode_prob_depth(
depth_cls_preds, depth_range, depth_unit, 'linear', num_depth_cls)
expected_preds = torch.tensor([
21.1431, 30.2421, 25.8964, 41.6116, 38.6234, 21.4582, 23.2993, 30.1111,
13.9273, 36.8419
])
assert torch.allclose(linear_prob_depth_preds, expected_preds, atol=1e-3)
log_prob_depth_preds = bbox_coder.decode_prob_depth(
depth_cls_preds, depth_range, depth_unit, 'log', num_depth_cls)
expected_preds = torch.tensor([
12.6458, 24.2487, 17.4015, 36.9375, 27.5982, 12.5510, 15.6635, 19.8408,
9.1605, 31.3765
])
assert torch.allclose(log_prob_depth_preds, expected_preds, atol=1e-3)
loguniform_prob_depth_preds = bbox_coder.decode_prob_depth(
depth_cls_preds, depth_range, depth_unit, 'loguniform', num_depth_cls)
expected_preds = torch.tensor([
6.9925, 10.3273, 8.9895, 18.6524, 16.4667, 7.3196, 7.5078, 11.3207,
3.7987, 13.6095
])
assert torch.allclose(
loguniform_prob_depth_preds, expected_preds, atol=1e-3)
def test_smoke_bbox_coder():
bbox_coder_cfg = dict(
type='SMOKECoder',
base_depth=(28.01, 16.32),
base_dims=((3.88, 1.63, 1.53), (1.78, 1.70, 0.58), (0.88, 1.73, 0.67)),
code_size=7)
bbox_coder = TASK_UTILS.build(bbox_coder_cfg)
regression = torch.rand([200, 8])
points = torch.rand([200, 2])
labels = torch.ones([2, 100])
cam2imgs = torch.rand([2, 4, 4])
trans_mats = torch.rand([2, 3, 3])
img_metas = [dict(box_type_3d=CameraInstance3DBoxes) for i in range(2)]
locations, dimensions, orientations = bbox_coder.decode(
regression, points, labels, cam2imgs, trans_mats)
assert locations.shape == torch.Size([200, 3])
assert dimensions.shape == torch.Size([200, 3])
assert orientations.shape == torch.Size([200, 1])
bboxes = bbox_coder.encode(locations, dimensions, orientations, img_metas)
assert bboxes.tensor.shape == torch.Size([200, 7])
# specically designed to test orientation decode function's
# special cases.
ori_vector = torch.tensor([[-0.9, -0.01], [-0.9, 0.01]])
locations = torch.tensor([[15., 2., 1.], [15., 2., -1.]])
orientations = bbox_coder._decode_orientation(ori_vector, locations)
assert orientations.shape == torch.Size([2, 1])
def test_monoflex_bbox_coder():
bbox_coder_cfg = dict(
type='MonoFlexCoder',
depth_mode='exp',
base_depth=(26.494627, 16.05988),
depth_range=[0.1, 100],
combine_depth=True,
uncertainty_range=[-10, 10],
base_dims=((3.8840, 1.5261, 1.6286, 0.4259, 0.1367,
0.1022), (0.8423, 1.7607, 0.6602, 0.2349, 0.1133, 0.1427),
(1.7635, 1.7372, 0.5968, 0.1766, 0.0948, 0.1242)),
dims_mode='linear',
multibin=True,
num_dir_bins=4,
bin_centers=[0, np.pi / 2, np.pi, -np.pi / 2],
bin_margin=np.pi / 6,
code_size=7)
bbox_coder = TASK_UTILS.build(bbox_coder_cfg)
gt_bboxes_3d = CameraInstance3DBoxes(torch.rand([6, 7]))
orientation_target = bbox_coder.encode(gt_bboxes_3d)
assert orientation_target.shape == torch.Size([6, 8])
regression = torch.rand([100, 50])
base_centers2d = torch.rand([100, 2])
labels = torch.ones([100])
downsample_ratio = 4
cam2imgs = torch.rand([100, 4, 4])
preds = bbox_coder.decode(regression, base_centers2d, labels,
downsample_ratio, cam2imgs)
assert preds['bboxes2d'].shape == torch.Size([100, 4])
assert preds['dimensions'].shape == torch.Size([100, 3])
assert preds['offsets2d'].shape == torch.Size([100, 2])
assert preds['keypoints2d'].shape == torch.Size([100, 10, 2])
assert preds['orientations'].shape == torch.Size([100, 16])
assert preds['direct_depth'].shape == torch.Size([
100,
])
assert preds['keypoints_depth'].shape == torch.Size([100, 3])
assert preds['combined_depth'].shape == torch.Size([
100,
])
assert preds['direct_depth_uncertainty'].shape == torch.Size([
100,
])
assert preds['keypoints_depth_uncertainty'].shape == torch.Size([100, 3])
offsets_2d = torch.randn([100, 2])
depths = torch.randn([
100,
])
locations = bbox_coder.decode_location(base_centers2d, offsets_2d, depths,
cam2imgs, downsample_ratio)
assert locations.shape == torch.Size([100, 3])
orientations = torch.randn([100, 16])
yaws, local_yaws = bbox_coder.decode_orientation(orientations, locations)
assert yaws.shape == torch.Size([
100,
])
assert local_yaws.shape == torch.Size([
100,
])
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmcv.cnn import Scale
from torch import nn as nn
from mmdet3d.registry import TASK_UTILS
def test_pgd_bbox_coder():
# test a config without priors
bbox_coder_cfg = dict(
type='PGDBBoxCoder',
base_depths=None,
base_dims=None,
code_size=7,
norm_on_bbox=True)
bbox_coder = TASK_UTILS.build(bbox_coder_cfg)
# test decode_2d
# [2, 27, 1, 1]
batch_bbox = torch.tensor([[[[0.0103]], [[0.7394]], [[0.3296]], [[0.4708]],
[[0.1439]], [[0.0778]], [[0.9399]], [[0.8366]],
[[0.1264]], [[0.3030]], [[0.1898]], [[0.0714]],
[[0.4144]], [[0.4341]], [[0.6442]], [[0.2951]],
[[0.2890]], [[0.4486]], [[0.2848]], [[0.1071]],
[[0.9530]], [[0.9460]], [[0.3822]], [[0.9320]],
[[0.2611]], [[0.5580]], [[0.0397]]],
[[[0.8612]], [[0.1680]], [[0.5167]], [[0.8502]],
[[0.0377]], [[0.3615]], [[0.9550]], [[0.5219]],
[[0.1402]], [[0.6843]], [[0.2121]], [[0.9468]],
[[0.6238]], [[0.7918]], [[0.1646]], [[0.0500]],
[[0.6290]], [[0.3956]], [[0.2901]], [[0.4612]],
[[0.7333]], [[0.1194]], [[0.6999]], [[0.3980]],
[[0.3262]], [[0.7185]], [[0.4474]]]])
batch_scale = nn.ModuleList([Scale(1.0) for _ in range(5)])
stride = 2
training = False
cls_score = torch.randn([2, 2, 1, 1]).sigmoid()
decode_bbox = bbox_coder.decode(batch_bbox, batch_scale, stride, training,
cls_score)
max_regress_range = 16
pred_keypoints = True
pred_bbox2d = True
decode_bbox_w2d = bbox_coder.decode_2d(decode_bbox, batch_scale, stride,
max_regress_range, training,
pred_keypoints, pred_bbox2d)
expected_decode_bbox_w2d = torch.tensor(
[[[[0.0206]], [[1.4788]],
[[1.3904]], [[1.6013]], [[1.1548]], [[1.0809]], [[0.9399]],
[[10.9441]], [[2.0117]], [[4.7049]], [[3.0009]], [[1.1405]],
[[6.2752]], [[6.5399]], [[9.0840]], [[4.5892]], [[4.4994]],
[[6.7320]], [[4.4375]], [[1.7071]], [[11.8582]], [[11.8075]],
[[5.8339]], [[1.8640]], [[0.5222]], [[1.1160]], [[0.0794]]],
[[[1.7224]], [[0.3360]], [[1.6765]], [[2.3401]], [[1.0384]],
[[1.4355]], [[0.9550]], [[7.6666]], [[2.2286]], [[9.5089]],
[[3.3436]], [[11.8133]], [[8.8603]], [[10.5508]], [[2.6101]],
[[0.7993]], [[8.9178]], [[6.0188]], [[4.5156]], [[6.8970]],
[[10.0013]], [[1.9014]], [[9.6689]], [[0.7960]], [[0.6524]],
[[1.4370]], [[0.8948]]]])
assert torch.allclose(expected_decode_bbox_w2d, decode_bbox_w2d, atol=1e-3)
# test decode_prob_depth
# [10, 8]
depth_cls_preds = torch.tensor([
[-0.4383, 0.7207, -0.4092, 0.4649, 0.8526, 0.6186, -1.4312, -0.7150],
[0.0621, 0.2369, 0.5170, 0.8484, -0.1099, 0.1829, -0.0072, 1.0618],
[-1.6114, -0.1057, 0.5721, -0.5986, -2.0471, 0.8140, -0.8385, -0.4822],
[0.0742, -0.3261, 0.4607, 1.8155, -0.3571, -0.0234, 0.3787, 2.3251],
[1.0492, -0.6881, -0.0136, -1.8291, 0.8460, -1.0171, 2.5691, -0.8114],
[0.0968, -0.5601, 1.0458, 0.2560, 1.3018, 0.1635, 0.0680, -1.0263],
[-0.0765, 0.1498, -2.7321, 1.0047, -0.2505, 0.0871, -0.4820, -0.3003],
[-0.4123, 0.2298, -0.1330, -0.6008, 0.6526, 0.7118, 0.9728, -0.7793],
[1.6940, 0.3355, 1.4661, 0.5477, 0.8667, 0.0527, -0.9975, -0.0689],
[0.4724, -0.3632, -0.0654, 0.4034, -0.3494, -0.7548, 0.7297, 1.2754]
])
depth_range = (0, 70)
depth_unit = 10
num_depth_cls = 8
uniform_prob_depth_preds = bbox_coder.decode_prob_depth(
depth_cls_preds, depth_range, depth_unit, 'uniform', num_depth_cls)
expected_preds = torch.tensor([
32.0441, 38.4689, 36.1831, 48.2096, 46.1560, 32.7973, 33.2155, 39.9822,
21.9905, 43.0161
])
assert torch.allclose(uniform_prob_depth_preds, expected_preds, atol=1e-3)
linear_prob_depth_preds = bbox_coder.decode_prob_depth(
depth_cls_preds, depth_range, depth_unit, 'linear', num_depth_cls)
expected_preds = torch.tensor([
21.1431, 30.2421, 25.8964, 41.6116, 38.6234, 21.4582, 23.2993, 30.1111,
13.9273, 36.8419
])
assert torch.allclose(linear_prob_depth_preds, expected_preds, atol=1e-3)
log_prob_depth_preds = bbox_coder.decode_prob_depth(
depth_cls_preds, depth_range, depth_unit, 'log', num_depth_cls)
expected_preds = torch.tensor([
12.6458, 24.2487, 17.4015, 36.9375, 27.5982, 12.5510, 15.6635, 19.8408,
9.1605, 31.3765
])
assert torch.allclose(log_prob_depth_preds, expected_preds, atol=1e-3)
loguniform_prob_depth_preds = bbox_coder.decode_prob_depth(
depth_cls_preds, depth_range, depth_unit, 'loguniform', num_depth_cls)
expected_preds = torch.tensor([
6.9925, 10.3273, 8.9895, 18.6524, 16.4667, 7.3196, 7.5078, 11.3207,
3.7987, 13.6095
])
assert torch.allclose(
loguniform_prob_depth_preds, expected_preds, atol=1e-3)
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdet3d.registry import TASK_UTILS
def test_point_xyzwhlr_bbox_coder():
bbox_coder_cfg = dict(
type='PointXYZWHLRBBoxCoder',
use_mean_size=True,
mean_size=[[3.9, 1.6, 1.56], [0.8, 0.6, 1.73], [1.76, 0.6, 1.73]])
boxcoder = TASK_UTILS.build(bbox_coder_cfg)
# test encode
gt_bboxes_3d = torch.tensor(
[[13.3329, 2.3514, -0.7004, 1.7508, 0.4702, 1.7909, -3.0522],
[2.2068, -2.6994, -0.3277, 3.8703, 1.6602, 1.6913, -1.9057],
[5.5269, 2.5085, -1.0129, 1.1496, 0.8006, 1.8887, 2.1756]])
points = torch.tensor([[13.70, 2.40, 0.12], [3.20, -3.00, 0.2],
[5.70, 2.20, -0.4]])
gt_labels_3d = torch.tensor([2, 0, 1])
bbox_target = boxcoder.encode(gt_bboxes_3d, points, gt_labels_3d)
expected_bbox_target = torch.tensor([[
-0.1974, -0.0261, -0.4742, -0.0052, -0.2438, 0.0346, -0.9960, -0.0893
], [-0.2356, 0.0713, -0.3383, -0.0076, 0.0369, 0.0808, -0.3287, -0.9444
], [-0.1731, 0.3085, -0.3543, 0.3626, 0.2884, 0.0878, -0.5686,
0.8226]])
assert torch.allclose(expected_bbox_target, bbox_target, atol=1e-4)
# test decode
bbox3d_out = boxcoder.decode(bbox_target, points, gt_labels_3d)
assert torch.allclose(bbox3d_out, gt_bboxes_3d, atol=1e-4)
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdet3d.registry import TASK_UTILS
from mmdet3d.structures import CameraInstance3DBoxes
def test_smoke_bbox_coder():
bbox_coder_cfg = dict(
type='SMOKECoder',
base_depth=(28.01, 16.32),
base_dims=((3.88, 1.63, 1.53), (1.78, 1.70, 0.58), (0.88, 1.73, 0.67)),
code_size=7)
bbox_coder = TASK_UTILS.build(bbox_coder_cfg)
regression = torch.rand([200, 8])
points = torch.rand([200, 2])
labels = torch.ones([2, 100])
cam2imgs = torch.rand([2, 4, 4])
trans_mats = torch.rand([2, 3, 3])
img_metas = [dict(box_type_3d=CameraInstance3DBoxes) for i in range(2)]
locations, dimensions, orientations = bbox_coder.decode(
regression, points, labels, cam2imgs, trans_mats)
assert locations.shape == torch.Size([200, 3])
assert dimensions.shape == torch.Size([200, 3])
assert orientations.shape == torch.Size([200, 1])
bboxes = bbox_coder.encode(locations, dimensions, orientations, img_metas)
assert bboxes.tensor.shape == torch.Size([200, 7])
# specically designed to test orientation decode function's
# special cases.
ori_vector = torch.tensor([[-0.9, -0.01], [-0.9, 0.01]])
locations = torch.tensor([[15., 2., 1.], [15., 2., -1.]])
orientations = bbox_coder._decode_orientation(ori_vector, locations)
assert orientations.shape == torch.Size([2, 1])
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet3d.models.builder import build_voxel_encoder
def test_pillar_feature_net():
if not torch.cuda.is_available():
pytest.skip('test requires GPU and torch+cuda')
pillar_feature_net_cfg = dict(
type='PillarFeatureNet',
in_channels=5,
feat_channels=[64],
with_distance=False,
voxel_size=(0.2, 0.2, 8),
point_cloud_range=(-51.2, -51.2, -5.0, 51.2, 51.2, 3.0),
norm_cfg=dict(type='BN1d', eps=1e-3, momentum=0.01))
pillar_feature_net = build_voxel_encoder(pillar_feature_net_cfg)
features = torch.rand([97297, 20, 5])
num_voxels = torch.randint(1, 100, [97297])
coors = torch.randint(0, 100, [97297, 4])
features = pillar_feature_net(features, num_voxels, coors)
assert features.shape == torch.Size([97297, 64])
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet3d.models.builder import build_voxel_encoder
def test_pillar_feature_net():
pillar_feature_net_cfg = dict(
type='PillarFeatureNet',
in_channels=5,
feat_channels=[64],
with_distance=False,
voxel_size=(0.2, 0.2, 8),
point_cloud_range=(-51.2, -51.2, -5.0, 51.2, 51.2, 3.0),
norm_cfg=dict(type='BN1d', eps=1e-3, momentum=0.01))
pillar_feature_net = build_voxel_encoder(pillar_feature_net_cfg)
features = torch.rand([97297, 20, 5])
num_voxels = torch.randint(1, 100, [97297])
coors = torch.randint(0, 100, [97297, 4])
features = pillar_feature_net(features, num_voxels, coors)
assert features.shape == torch.Size([97297, 64])
def test_hard_simple_VFE():
if not torch.cuda.is_available():
pytest.skip('test requires GPU and torch+cuda')
hard_simple_VFE_cfg = dict(type='HardSimpleVFE', num_features=5)
hard_simple_VFE = build_voxel_encoder(hard_simple_VFE_cfg)
features = torch.rand([240000, 10, 5])
......
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import pytest
import torch
from mmdet3d.structures.points import BasePoints
def test_base_points():
# test empty initialization
empty_boxes = []
points = BasePoints(empty_boxes)
assert points.tensor.shape[0] == 0
assert points.tensor.shape[1] == 3
# Test init with origin
points_np = np.array([[-5.24223238e+00, 4.00209696e+01, 2.97570381e-01],
[-2.66751588e+01, 5.59499564e+00, -9.14345860e-01],
[-5.80979675e+00, 3.54092357e+01, 2.00889888e-01],
[-3.13086877e+01, 1.09007628e+00, -1.94612112e-01]],
dtype=np.float32)
base_points = BasePoints(points_np, points_dim=3)
assert base_points.tensor.shape[0] == 4
# Test init with color and height
points_np = np.array([[
-5.24223238e+00, 4.00209696e+01, 2.97570381e-01, 0.6666, 0.1956,
0.4974, 0.9409
],
[
-2.66751588e+01, 5.59499564e+00, -9.14345860e-01,
0.1502, 0.3707, 0.1086, 0.6297
],
[
-5.80979675e+00, 3.54092357e+01, 2.00889888e-01,
0.6565, 0.6248, 0.6954, 0.2538
],
[
-3.13086877e+01, 1.09007628e+00, -1.94612112e-01,
0.2803, 0.0258, 0.4896, 0.3269
]],
dtype=np.float32)
base_points = BasePoints(
points_np,
points_dim=7,
attribute_dims=dict(color=[3, 4, 5], height=6))
expected_tensor = torch.tensor([[
-5.24223238e+00, 4.00209696e+01, 2.97570381e-01, 0.6666, 0.1956,
0.4974, 0.9409
],
[
-2.66751588e+01, 5.59499564e+00,
-9.14345860e-01, 0.1502, 0.3707,
0.1086, 0.6297
],
[
-5.80979675e+00, 3.54092357e+01,
2.00889888e-01, 0.6565, 0.6248, 0.6954,
0.2538
],
[
-3.13086877e+01, 1.09007628e+00,
-1.94612112e-01, 0.2803, 0.0258,
0.4896, 0.3269
]])
assert torch.allclose(expected_tensor, base_points.tensor)
assert torch.allclose(expected_tensor[:, :2], base_points.bev)
assert torch.allclose(expected_tensor[:, :3], base_points.coord)
assert torch.allclose(expected_tensor[:, 3:6], base_points.color)
assert torch.allclose(expected_tensor[:, 6], base_points.height)
# test points clone
new_base_points = base_points.clone()
assert torch.allclose(new_base_points.tensor, base_points.tensor)
# test points shuffle
new_base_points.shuffle()
assert new_base_points.tensor.shape == torch.Size([4, 7])
# test points rotation
rot_mat = torch.tensor([[0.93629336, -0.27509585, 0.21835066],
[0.28962948, 0.95642509, -0.03695701],
[-0.19866933, 0.0978434, 0.97517033]])
base_points.rotate(rot_mat)
expected_tensor = torch.tensor([[
6.6239e+00, 3.9748e+01, -2.3335e+00, 6.6660e-01, 1.9560e-01,
4.9740e-01, 9.4090e-01
],
[
-2.3174e+01, 1.2600e+01, -6.9230e+00,
1.5020e-01, 3.7070e-01, 1.0860e-01,
6.2970e-01
],
[
4.7760e+00, 3.5484e+01, -2.3813e+00,
6.5650e-01, 6.2480e-01, 6.9540e-01,
2.5380e-01
],
[
-2.8960e+01, 9.6364e+00, -7.0663e+00,
2.8030e-01, 2.5800e-02, 4.8960e-01,
3.2690e-01
]])
assert torch.allclose(expected_tensor, base_points.tensor, 1e-3)
new_base_points = base_points.clone()
new_base_points.rotate(0.1, axis=2)
expected_tensor = torch.tensor([[
2.6226e+00, 4.0211e+01, -2.3335e+00, 6.6660e-01, 1.9560e-01,
4.9740e-01, 9.4090e-01
],
[
-2.4316e+01, 1.0224e+01, -6.9230e+00,
1.5020e-01, 3.7070e-01, 1.0860e-01,
6.2970e-01
],
[
1.2096e+00, 3.5784e+01, -2.3813e+00,
6.5650e-01, 6.2480e-01, 6.9540e-01,
2.5380e-01
],
[
-2.9777e+01, 6.6971e+00, -7.0663e+00,
2.8030e-01, 2.5800e-02, 4.8960e-01,
3.2690e-01
]])
assert torch.allclose(expected_tensor, new_base_points.tensor, 1e-3)
# test points translation
translation_vector = torch.tensor([0.93629336, -0.27509585, 0.21835066])
base_points.translate(translation_vector)
expected_tensor = torch.tensor([[
7.5602e+00, 3.9473e+01, -2.1152e+00, 6.6660e-01, 1.9560e-01,
4.9740e-01, 9.4090e-01
],
[
-2.2237e+01, 1.2325e+01, -6.7046e+00,
1.5020e-01, 3.7070e-01, 1.0860e-01,
6.2970e-01
],
[
5.7123e+00, 3.5209e+01, -2.1629e+00,
6.5650e-01, 6.2480e-01, 6.9540e-01,
2.5380e-01
],
[
-2.8023e+01, 9.3613e+00, -6.8480e+00,
2.8030e-01, 2.5800e-02, 4.8960e-01,
3.2690e-01
]])
assert torch.allclose(expected_tensor, base_points.tensor, 1e-4)
# test points filter
point_range = [-10, -40, -10, 10, 40, 10]
in_range_flags = base_points.in_range_3d(point_range)
expected_flags = torch.tensor([True, False, True, False])
assert torch.all(in_range_flags == expected_flags)
# test points scale
base_points.scale(1.2)
expected_tensor = torch.tensor([[
9.0722e+00, 4.7368e+01, -2.5382e+00, 6.6660e-01, 1.9560e-01,
4.9740e-01, 9.4090e-01
],
[
-2.6685e+01, 1.4790e+01, -8.0455e+00,
1.5020e-01, 3.7070e-01, 1.0860e-01,
6.2970e-01
],
[
6.8547e+00, 4.2251e+01, -2.5955e+00,
6.5650e-01, 6.2480e-01, 6.9540e-01,
2.5380e-01
],
[
-3.3628e+01, 1.1234e+01, -8.2176e+00,
2.8030e-01, 2.5800e-02, 4.8960e-01,
3.2690e-01
]])
assert torch.allclose(expected_tensor, base_points.tensor, 1e-3)
# test get_item
expected_tensor = torch.tensor(
[[-26.6848, 14.7898, -8.0455, 0.1502, 0.3707, 0.1086, 0.6297]])
assert torch.allclose(expected_tensor, base_points[1].tensor, 1e-4)
expected_tensor = torch.tensor(
[[-26.6848, 14.7898, -8.0455, 0.1502, 0.3707, 0.1086, 0.6297],
[6.8547, 42.2509, -2.5955, 0.6565, 0.6248, 0.6954, 0.2538]])
assert torch.allclose(expected_tensor, base_points[1:3].tensor, 1e-4)
mask = torch.tensor([True, False, True, False])
expected_tensor = torch.tensor(
[[9.0722, 47.3678, -2.5382, 0.6666, 0.1956, 0.4974, 0.9409],
[6.8547, 42.2509, -2.5955, 0.6565, 0.6248, 0.6954, 0.2538]])
assert torch.allclose(expected_tensor, base_points[mask].tensor, 1e-4)
expected_tensor = torch.tensor([[0.6666], [0.1502], [0.6565], [0.2803]])
assert torch.allclose(expected_tensor, base_points[:, 3].tensor, 1e-4)
# test length
assert len(base_points) == 4
# test repr
expected_repr = 'BasePoints(\n '\
'tensor([[ 9.0722e+00, 4.7368e+01, -2.5382e+00, '\
'6.6660e-01, 1.9560e-01,\n 4.9740e-01, '\
'9.4090e-01],\n '\
'[-2.6685e+01, 1.4790e+01, -8.0455e+00, 1.5020e-01, '\
'3.7070e-01,\n '\
'1.0860e-01, 6.2970e-01],\n '\
'[ 6.8547e+00, 4.2251e+01, -2.5955e+00, 6.5650e-01, '\
'6.2480e-01,\n '\
'6.9540e-01, 2.5380e-01],\n '\
'[-3.3628e+01, 1.1234e+01, -8.2176e+00, 2.8030e-01, '\
'2.5800e-02,\n '\
'4.8960e-01, 3.2690e-01]]))'
assert expected_repr == str(base_points)
# test concatenate
base_points_clone = base_points.clone()
cat_points = BasePoints.cat([base_points, base_points_clone])
assert torch.allclose(cat_points.tensor[:len(base_points)],
base_points.tensor)
# test iteration
for i, point in enumerate(base_points):
assert torch.allclose(point, base_points.tensor[i])
# test new_point
new_points = base_points.new_point([[1, 2, 3, 4, 5, 6, 7]])
assert torch.allclose(
new_points.tensor,
torch.tensor([[1, 2, 3, 4, 5, 6, 7]], dtype=base_points.tensor.dtype))
# test BasePoint indexing
base_points = BasePoints(
points_np,
points_dim=7,
attribute_dims=dict(height=3, color=[4, 5, 6]))
assert torch.all(base_points[:, 3:].tensor == torch.tensor(points_np[:,
3:]))
# test set and get function for BasePoint color and height
base_points = BasePoints(points_np[:, :3])
assert base_points.attribute_dims is None
base_points.height = points_np[:, 3]
assert base_points.attribute_dims == dict(height=3)
base_points.color = points_np[:, 4:]
assert base_points.attribute_dims == dict(height=3, color=[4, 5, 6])
assert torch.allclose(base_points.height,
torch.tensor([0.6666, 0.1502, 0.6565, 0.2803]))
assert torch.allclose(
base_points.color,
torch.tensor([[0.1956, 0.4974, 0.9409], [0.3707, 0.1086, 0.6297],
[0.6248, 0.6954, 0.2538], [0.0258, 0.4896, 0.3269]]))
# values to be set should have correct shape (e.g. number of points)
with pytest.raises(ValueError):
base_points.coord = np.random.rand(5, 3)
with pytest.raises(ValueError):
base_points.height = np.random.rand(3)
with pytest.raises(ValueError):
base_points.color = np.random.rand(4, 2)
base_points.coord = points_np[:, [1, 2, 3]]
base_points.height = points_np[:, 0]
base_points.color = points_np[:, [4, 5, 6]]
assert np.allclose(base_points.coord, points_np[:, 1:4])
assert np.allclose(base_points.height, points_np[:, 0])
assert np.allclose(base_points.color, points_np[:, 4:])
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import pytest
import torch
from mmdet3d.structures.points import (BasePoints, CameraPoints, DepthPoints,
LiDARPoints)
def test_base_points():
# test empty initialization
empty_boxes = []
points = BasePoints(empty_boxes)
assert points.tensor.shape[0] == 0
assert points.tensor.shape[1] == 3
# Test init with origin
points_np = np.array([[-5.24223238e+00, 4.00209696e+01, 2.97570381e-01],
[-2.66751588e+01, 5.59499564e+00, -9.14345860e-01],
[-5.80979675e+00, 3.54092357e+01, 2.00889888e-01],
[-3.13086877e+01, 1.09007628e+00, -1.94612112e-01]],
dtype=np.float32)
base_points = BasePoints(points_np, points_dim=3)
assert base_points.tensor.shape[0] == 4
# Test init with color and height
points_np = np.array([[
-5.24223238e+00, 4.00209696e+01, 2.97570381e-01, 0.6666, 0.1956,
0.4974, 0.9409
],
[
-2.66751588e+01, 5.59499564e+00, -9.14345860e-01,
0.1502, 0.3707, 0.1086, 0.6297
],
[
-5.80979675e+00, 3.54092357e+01, 2.00889888e-01,
0.6565, 0.6248, 0.6954, 0.2538
],
[
-3.13086877e+01, 1.09007628e+00, -1.94612112e-01,
0.2803, 0.0258, 0.4896, 0.3269
]],
dtype=np.float32)
base_points = BasePoints(
points_np,
points_dim=7,
attribute_dims=dict(color=[3, 4, 5], height=6))
expected_tensor = torch.tensor([[
-5.24223238e+00, 4.00209696e+01, 2.97570381e-01, 0.6666, 0.1956,
0.4974, 0.9409
],
[
-2.66751588e+01, 5.59499564e+00,
-9.14345860e-01, 0.1502, 0.3707,
0.1086, 0.6297
],
[
-5.80979675e+00, 3.54092357e+01,
2.00889888e-01, 0.6565, 0.6248, 0.6954,
0.2538
],
[
-3.13086877e+01, 1.09007628e+00,
-1.94612112e-01, 0.2803, 0.0258,
0.4896, 0.3269
]])
assert torch.allclose(expected_tensor, base_points.tensor)
assert torch.allclose(expected_tensor[:, :2], base_points.bev)
assert torch.allclose(expected_tensor[:, :3], base_points.coord)
assert torch.allclose(expected_tensor[:, 3:6], base_points.color)
assert torch.allclose(expected_tensor[:, 6], base_points.height)
# test points clone
new_base_points = base_points.clone()
assert torch.allclose(new_base_points.tensor, base_points.tensor)
# test points shuffle
new_base_points.shuffle()
assert new_base_points.tensor.shape == torch.Size([4, 7])
# test points rotation
rot_mat = torch.tensor([[0.93629336, -0.27509585, 0.21835066],
[0.28962948, 0.95642509, -0.03695701],
[-0.19866933, 0.0978434, 0.97517033]])
base_points.rotate(rot_mat)
expected_tensor = torch.tensor([[
6.6239e+00, 3.9748e+01, -2.3335e+00, 6.6660e-01, 1.9560e-01,
4.9740e-01, 9.4090e-01
],
[
-2.3174e+01, 1.2600e+01, -6.9230e+00,
1.5020e-01, 3.7070e-01, 1.0860e-01,
6.2970e-01
],
[
4.7760e+00, 3.5484e+01, -2.3813e+00,
6.5650e-01, 6.2480e-01, 6.9540e-01,
2.5380e-01
],
[
-2.8960e+01, 9.6364e+00, -7.0663e+00,
2.8030e-01, 2.5800e-02, 4.8960e-01,
3.2690e-01
]])
assert torch.allclose(expected_tensor, base_points.tensor, 1e-3)
new_base_points = base_points.clone()
new_base_points.rotate(0.1, axis=2)
expected_tensor = torch.tensor([[
2.6226e+00, 4.0211e+01, -2.3335e+00, 6.6660e-01, 1.9560e-01,
4.9740e-01, 9.4090e-01
],
[
-2.4316e+01, 1.0224e+01, -6.9230e+00,
1.5020e-01, 3.7070e-01, 1.0860e-01,
6.2970e-01
],
[
1.2096e+00, 3.5784e+01, -2.3813e+00,
6.5650e-01, 6.2480e-01, 6.9540e-01,
2.5380e-01
],
[
-2.9777e+01, 6.6971e+00, -7.0663e+00,
2.8030e-01, 2.5800e-02, 4.8960e-01,
3.2690e-01
]])
assert torch.allclose(expected_tensor, new_base_points.tensor, 1e-3)
# test points translation
translation_vector = torch.tensor([0.93629336, -0.27509585, 0.21835066])
base_points.translate(translation_vector)
expected_tensor = torch.tensor([[
7.5602e+00, 3.9473e+01, -2.1152e+00, 6.6660e-01, 1.9560e-01,
4.9740e-01, 9.4090e-01
],
[
-2.2237e+01, 1.2325e+01, -6.7046e+00,
1.5020e-01, 3.7070e-01, 1.0860e-01,
6.2970e-01
],
[
5.7123e+00, 3.5209e+01, -2.1629e+00,
6.5650e-01, 6.2480e-01, 6.9540e-01,
2.5380e-01
],
[
-2.8023e+01, 9.3613e+00, -6.8480e+00,
2.8030e-01, 2.5800e-02, 4.8960e-01,
3.2690e-01
]])
assert torch.allclose(expected_tensor, base_points.tensor, 1e-4)
# test points filter
point_range = [-10, -40, -10, 10, 40, 10]
in_range_flags = base_points.in_range_3d(point_range)
expected_flags = torch.tensor([True, False, True, False])
assert torch.all(in_range_flags == expected_flags)
# test points scale
base_points.scale(1.2)
expected_tensor = torch.tensor([[
9.0722e+00, 4.7368e+01, -2.5382e+00, 6.6660e-01, 1.9560e-01,
4.9740e-01, 9.4090e-01
],
[
-2.6685e+01, 1.4790e+01, -8.0455e+00,
1.5020e-01, 3.7070e-01, 1.0860e-01,
6.2970e-01
],
[
6.8547e+00, 4.2251e+01, -2.5955e+00,
6.5650e-01, 6.2480e-01, 6.9540e-01,
2.5380e-01
],
[
-3.3628e+01, 1.1234e+01, -8.2176e+00,
2.8030e-01, 2.5800e-02, 4.8960e-01,
3.2690e-01
]])
assert torch.allclose(expected_tensor, base_points.tensor, 1e-3)
# test get_item
expected_tensor = torch.tensor(
[[-26.6848, 14.7898, -8.0455, 0.1502, 0.3707, 0.1086, 0.6297]])
assert torch.allclose(expected_tensor, base_points[1].tensor, 1e-4)
expected_tensor = torch.tensor(
[[-26.6848, 14.7898, -8.0455, 0.1502, 0.3707, 0.1086, 0.6297],
[6.8547, 42.2509, -2.5955, 0.6565, 0.6248, 0.6954, 0.2538]])
assert torch.allclose(expected_tensor, base_points[1:3].tensor, 1e-4)
mask = torch.tensor([True, False, True, False])
expected_tensor = torch.tensor(
[[9.0722, 47.3678, -2.5382, 0.6666, 0.1956, 0.4974, 0.9409],
[6.8547, 42.2509, -2.5955, 0.6565, 0.6248, 0.6954, 0.2538]])
assert torch.allclose(expected_tensor, base_points[mask].tensor, 1e-4)
expected_tensor = torch.tensor([[0.6666], [0.1502], [0.6565], [0.2803]])
assert torch.allclose(expected_tensor, base_points[:, 3].tensor, 1e-4)
# test length
assert len(base_points) == 4
# test repr
expected_repr = 'BasePoints(\n '\
'tensor([[ 9.0722e+00, 4.7368e+01, -2.5382e+00, '\
'6.6660e-01, 1.9560e-01,\n 4.9740e-01, '\
'9.4090e-01],\n '\
'[-2.6685e+01, 1.4790e+01, -8.0455e+00, 1.5020e-01, '\
'3.7070e-01,\n '\
'1.0860e-01, 6.2970e-01],\n '\
'[ 6.8547e+00, 4.2251e+01, -2.5955e+00, 6.5650e-01, '\
'6.2480e-01,\n '\
'6.9540e-01, 2.5380e-01],\n '\
'[-3.3628e+01, 1.1234e+01, -8.2176e+00, 2.8030e-01, '\
'2.5800e-02,\n '\
'4.8960e-01, 3.2690e-01]]))'
assert expected_repr == str(base_points)
# test concatenate
base_points_clone = base_points.clone()
cat_points = BasePoints.cat([base_points, base_points_clone])
assert torch.allclose(cat_points.tensor[:len(base_points)],
base_points.tensor)
# test iteration
for i, point in enumerate(base_points):
assert torch.allclose(point, base_points.tensor[i])
# test new_point
new_points = base_points.new_point([[1, 2, 3, 4, 5, 6, 7]])
assert torch.allclose(
new_points.tensor,
torch.tensor([[1, 2, 3, 4, 5, 6, 7]], dtype=base_points.tensor.dtype))
# test BasePoint indexing
base_points = BasePoints(
points_np,
points_dim=7,
attribute_dims=dict(height=3, color=[4, 5, 6]))
assert torch.all(base_points[:, 3:].tensor == torch.tensor(points_np[:,
3:]))
# test set and get function for BasePoint color and height
base_points = BasePoints(points_np[:, :3])
assert base_points.attribute_dims is None
base_points.height = points_np[:, 3]
assert base_points.attribute_dims == dict(height=3)
base_points.color = points_np[:, 4:]
assert base_points.attribute_dims == dict(height=3, color=[4, 5, 6])
assert torch.allclose(base_points.height,
torch.tensor([0.6666, 0.1502, 0.6565, 0.2803]))
assert torch.allclose(
base_points.color,
torch.tensor([[0.1956, 0.4974, 0.9409], [0.3707, 0.1086, 0.6297],
[0.6248, 0.6954, 0.2538], [0.0258, 0.4896, 0.3269]]))
# values to be set should have correct shape (e.g. number of points)
with pytest.raises(ValueError):
base_points.coord = np.random.rand(5, 3)
with pytest.raises(ValueError):
base_points.height = np.random.rand(3)
with pytest.raises(ValueError):
base_points.color = np.random.rand(4, 2)
base_points.coord = points_np[:, [1, 2, 3]]
base_points.height = points_np[:, 0]
base_points.color = points_np[:, [4, 5, 6]]
assert np.allclose(base_points.coord, points_np[:, 1:4])
assert np.allclose(base_points.height, points_np[:, 0])
assert np.allclose(base_points.color, points_np[:, 4:])
from mmdet3d.structures.points import CameraPoints, LiDARPoints
def test_cam_points():
......@@ -821,280 +557,3 @@ def test_lidar_points():
3.2690e-01
]])
assert torch.allclose(expected_tensor, lidar_points.tensor, 1e-4)
def test_depth_points():
# test empty initialization
empty_boxes = []
points = DepthPoints(empty_boxes)
assert points.tensor.shape[0] == 0
assert points.tensor.shape[1] == 3
# Test init with origin
points_np = np.array([[-5.24223238e+00, 4.00209696e+01, 2.97570381e-01],
[-2.66751588e+01, 5.59499564e+00, -9.14345860e-01],
[-5.80979675e+00, 3.54092357e+01, 2.00889888e-01],
[-3.13086877e+01, 1.09007628e+00, -1.94612112e-01]],
dtype=np.float32)
depth_points = DepthPoints(points_np, points_dim=3)
assert depth_points.tensor.shape[0] == 4
# Test init with color and height
points_np = np.array([[
-5.24223238e+00, 4.00209696e+01, 2.97570381e-01, 0.6666, 0.1956,
0.4974, 0.9409
],
[
-2.66751588e+01, 5.59499564e+00, -9.14345860e-01,
0.1502, 0.3707, 0.1086, 0.6297
],
[
-5.80979675e+00, 3.54092357e+01, 2.00889888e-01,
0.6565, 0.6248, 0.6954, 0.2538
],
[
-3.13086877e+01, 1.09007628e+00, -1.94612112e-01,
0.2803, 0.0258, 0.4896, 0.3269
]],
dtype=np.float32)
depth_points = DepthPoints(
points_np,
points_dim=7,
attribute_dims=dict(color=[3, 4, 5], height=6))
expected_tensor = torch.tensor([[
-5.24223238e+00, 4.00209696e+01, 2.97570381e-01, 0.6666, 0.1956,
0.4974, 0.9409
],
[
-2.66751588e+01, 5.59499564e+00,
-9.14345860e-01, 0.1502, 0.3707,
0.1086, 0.6297
],
[
-5.80979675e+00, 3.54092357e+01,
2.00889888e-01, 0.6565, 0.6248, 0.6954,
0.2538
],
[
-3.13086877e+01, 1.09007628e+00,
-1.94612112e-01, 0.2803, 0.0258,
0.4896, 0.3269
]])
assert torch.allclose(expected_tensor, depth_points.tensor)
assert torch.allclose(expected_tensor[:, :2], depth_points.bev)
assert torch.allclose(expected_tensor[:, :3], depth_points.coord)
assert torch.allclose(expected_tensor[:, 3:6], depth_points.color)
assert torch.allclose(expected_tensor[:, 6], depth_points.height)
# test points clone
new_depth_points = depth_points.clone()
assert torch.allclose(new_depth_points.tensor, depth_points.tensor)
# test points shuffle
new_depth_points.shuffle()
assert new_depth_points.tensor.shape == torch.Size([4, 7])
# test points rotation
rot_mat = torch.tensor([[0.93629336, -0.27509585, 0.21835066],
[0.28962948, 0.95642509, -0.03695701],
[-0.19866933, 0.0978434, 0.97517033]])
depth_points.rotate(rot_mat)
expected_tensor = torch.tensor([[
6.6239e+00, 3.9748e+01, -2.3335e+00, 6.6660e-01, 1.9560e-01,
4.9740e-01, 9.4090e-01
],
[
-2.3174e+01, 1.2600e+01, -6.9230e+00,
1.5020e-01, 3.7070e-01, 1.0860e-01,
6.2970e-01
],
[
4.7760e+00, 3.5484e+01, -2.3813e+00,
6.5650e-01, 6.2480e-01, 6.9540e-01,
2.5380e-01
],
[
-2.8960e+01, 9.6364e+00, -7.0663e+00,
2.8030e-01, 2.5800e-02, 4.8960e-01,
3.2690e-01
]])
assert torch.allclose(expected_tensor, depth_points.tensor, 1e-3)
new_depth_points = depth_points.clone()
new_depth_points.rotate(0.1, axis=2)
expected_tensor = torch.tensor([[
2.6226e+00, 4.0211e+01, -2.3335e+00, 6.6660e-01, 1.9560e-01,
4.9740e-01, 9.4090e-01
],
[
-2.4316e+01, 1.0224e+01, -6.9230e+00,
1.5020e-01, 3.7070e-01, 1.0860e-01,
6.2970e-01
],
[
1.2096e+00, 3.5784e+01, -2.3813e+00,
6.5650e-01, 6.2480e-01, 6.9540e-01,
2.5380e-01
],
[
-2.9777e+01, 6.6971e+00, -7.0663e+00,
2.8030e-01, 2.5800e-02, 4.8960e-01,
3.2690e-01
]])
assert torch.allclose(expected_tensor, new_depth_points.tensor, 1e-3)
# test points translation
translation_vector = torch.tensor([0.93629336, -0.27509585, 0.21835066])
depth_points.translate(translation_vector)
expected_tensor = torch.tensor([[
7.5602e+00, 3.9473e+01, -2.1152e+00, 6.6660e-01, 1.9560e-01,
4.9740e-01, 9.4090e-01
],
[
-2.2237e+01, 1.2325e+01, -6.7046e+00,
1.5020e-01, 3.7070e-01, 1.0860e-01,
6.2970e-01
],
[
5.7123e+00, 3.5209e+01, -2.1629e+00,
6.5650e-01, 6.2480e-01, 6.9540e-01,
2.5380e-01
],
[
-2.8023e+01, 9.3613e+00, -6.8480e+00,
2.8030e-01, 2.5800e-02, 4.8960e-01,
3.2690e-01
]])
assert torch.allclose(expected_tensor, depth_points.tensor, 1e-4)
# test points filter
point_range = [-10, -40, -10, 10, 40, 10]
in_range_flags = depth_points.in_range_3d(point_range)
expected_flags = torch.tensor([True, False, True, False])
assert torch.all(in_range_flags == expected_flags)
# test points scale
depth_points.scale(1.2)
expected_tensor = torch.tensor([[
9.0722e+00, 4.7368e+01, -2.5382e+00, 6.6660e-01, 1.9560e-01,
4.9740e-01, 9.4090e-01
],
[
-2.6685e+01, 1.4790e+01, -8.0455e+00,
1.5020e-01, 3.7070e-01, 1.0860e-01,
6.2970e-01
],
[
6.8547e+00, 4.2251e+01, -2.5955e+00,
6.5650e-01, 6.2480e-01, 6.9540e-01,
2.5380e-01
],
[
-3.3628e+01, 1.1234e+01, -8.2176e+00,
2.8030e-01, 2.5800e-02, 4.8960e-01,
3.2690e-01
]])
assert torch.allclose(expected_tensor, depth_points.tensor, 1e-3)
# test get_item
expected_tensor = torch.tensor(
[[-26.6848, 14.7898, -8.0455, 0.1502, 0.3707, 0.1086, 0.6297]])
assert torch.allclose(expected_tensor, depth_points[1].tensor, 1e-4)
expected_tensor = torch.tensor(
[[-26.6848, 14.7898, -8.0455, 0.1502, 0.3707, 0.1086, 0.6297],
[6.8547, 42.2509, -2.5955, 0.6565, 0.6248, 0.6954, 0.2538]])
assert torch.allclose(expected_tensor, depth_points[1:3].tensor, 1e-4)
mask = torch.tensor([True, False, True, False])
expected_tensor = torch.tensor(
[[9.0722, 47.3678, -2.5382, 0.6666, 0.1956, 0.4974, 0.9409],
[6.8547, 42.2509, -2.5955, 0.6565, 0.6248, 0.6954, 0.2538]])
assert torch.allclose(expected_tensor, depth_points[mask].tensor, 1e-4)
expected_tensor = torch.tensor([[0.6666], [0.1502], [0.6565], [0.2803]])
assert torch.allclose(expected_tensor, depth_points[:, 3].tensor, 1e-4)
# test length
assert len(depth_points) == 4
# test repr
expected_repr = 'DepthPoints(\n '\
'tensor([[ 9.0722e+00, 4.7368e+01, -2.5382e+00, '\
'6.6660e-01, 1.9560e-01,\n 4.9740e-01, '\
'9.4090e-01],\n '\
'[-2.6685e+01, 1.4790e+01, -8.0455e+00, 1.5020e-01, '\
'3.7070e-01,\n '\
'1.0860e-01, 6.2970e-01],\n '\
'[ 6.8547e+00, 4.2251e+01, -2.5955e+00, 6.5650e-01, '\
'6.2480e-01,\n '\
'6.9540e-01, 2.5380e-01],\n '\
'[-3.3628e+01, 1.1234e+01, -8.2176e+00, 2.8030e-01, '\
'2.5800e-02,\n '\
'4.8960e-01, 3.2690e-01]]))'
assert expected_repr == str(depth_points)
# test concatenate
depth_points_clone = depth_points.clone()
cat_points = DepthPoints.cat([depth_points, depth_points_clone])
assert torch.allclose(cat_points.tensor[:len(depth_points)],
depth_points.tensor)
# test iteration
for i, point in enumerate(depth_points):
assert torch.allclose(point, depth_points.tensor[i])
# test new_point
new_points = depth_points.new_point([[1, 2, 3, 4, 5, 6, 7]])
assert torch.allclose(
new_points.tensor,
torch.tensor([[1, 2, 3, 4, 5, 6, 7]], dtype=depth_points.tensor.dtype))
# test in_range_bev
point_bev_range = [-30, -40, 30, 40]
in_range_flags = depth_points.in_range_bev(point_bev_range)
expected_flags = torch.tensor([False, True, False, False])
assert torch.all(in_range_flags == expected_flags)
# test flip
depth_points.flip(bev_direction='horizontal')
expected_tensor = torch.tensor([[
-9.0722e+00, 4.7368e+01, -2.5382e+00, 6.6660e-01, 1.9560e-01,
4.9740e-01, 9.4090e-01
],
[
2.6685e+01, 1.4790e+01, -8.0455e+00,
1.5020e-01, 3.7070e-01, 1.0860e-01,
6.2970e-01
],
[
-6.8547e+00, 4.2251e+01, -2.5955e+00,
6.5650e-01, 6.2480e-01, 6.9540e-01,
2.5380e-01
],
[
3.3628e+01, 1.1234e+01, -8.2176e+00,
2.8030e-01, 2.5800e-02, 4.8960e-01,
3.2690e-01
]])
assert torch.allclose(expected_tensor, depth_points.tensor, 1e-4)
depth_points.flip(bev_direction='vertical')
expected_tensor = torch.tensor([[
-9.0722e+00, -4.7368e+01, -2.5382e+00, 6.6660e-01, 1.9560e-01,
4.9740e-01, 9.4090e-01
],
[
2.6685e+01, -1.4790e+01, -8.0455e+00,
1.5020e-01, 3.7070e-01, 1.0860e-01,
6.2970e-01
],
[
-6.8547e+00, -4.2251e+01, -2.5955e+00,
6.5650e-01, 6.2480e-01, 6.9540e-01,
2.5380e-01
],
[
3.3628e+01, -1.1234e+01, -8.2176e+00,
2.8030e-01, 2.5800e-02, 4.8960e-01,
3.2690e-01
]])
assert torch.allclose(expected_tensor, depth_points.tensor, 1e-4)
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
from mmdet3d.structures.points import DepthPoints
def test_depth_points():
# test empty initialization
empty_boxes = []
points = DepthPoints(empty_boxes)
assert points.tensor.shape[0] == 0
assert points.tensor.shape[1] == 3
# Test init with origin
points_np = np.array([[-5.24223238e+00, 4.00209696e+01, 2.97570381e-01],
[-2.66751588e+01, 5.59499564e+00, -9.14345860e-01],
[-5.80979675e+00, 3.54092357e+01, 2.00889888e-01],
[-3.13086877e+01, 1.09007628e+00, -1.94612112e-01]],
dtype=np.float32)
depth_points = DepthPoints(points_np, points_dim=3)
assert depth_points.tensor.shape[0] == 4
# Test init with color and height
points_np = np.array([[
-5.24223238e+00, 4.00209696e+01, 2.97570381e-01, 0.6666, 0.1956,
0.4974, 0.9409
],
[
-2.66751588e+01, 5.59499564e+00, -9.14345860e-01,
0.1502, 0.3707, 0.1086, 0.6297
],
[
-5.80979675e+00, 3.54092357e+01, 2.00889888e-01,
0.6565, 0.6248, 0.6954, 0.2538
],
[
-3.13086877e+01, 1.09007628e+00, -1.94612112e-01,
0.2803, 0.0258, 0.4896, 0.3269
]],
dtype=np.float32)
depth_points = DepthPoints(
points_np,
points_dim=7,
attribute_dims=dict(color=[3, 4, 5], height=6))
expected_tensor = torch.tensor([[
-5.24223238e+00, 4.00209696e+01, 2.97570381e-01, 0.6666, 0.1956,
0.4974, 0.9409
],
[
-2.66751588e+01, 5.59499564e+00,
-9.14345860e-01, 0.1502, 0.3707,
0.1086, 0.6297
],
[
-5.80979675e+00, 3.54092357e+01,
2.00889888e-01, 0.6565, 0.6248, 0.6954,
0.2538
],
[
-3.13086877e+01, 1.09007628e+00,
-1.94612112e-01, 0.2803, 0.0258,
0.4896, 0.3269
]])
assert torch.allclose(expected_tensor, depth_points.tensor)
assert torch.allclose(expected_tensor[:, :2], depth_points.bev)
assert torch.allclose(expected_tensor[:, :3], depth_points.coord)
assert torch.allclose(expected_tensor[:, 3:6], depth_points.color)
assert torch.allclose(expected_tensor[:, 6], depth_points.height)
# test points clone
new_depth_points = depth_points.clone()
assert torch.allclose(new_depth_points.tensor, depth_points.tensor)
# test points shuffle
new_depth_points.shuffle()
assert new_depth_points.tensor.shape == torch.Size([4, 7])
# test points rotation
rot_mat = torch.tensor([[0.93629336, -0.27509585, 0.21835066],
[0.28962948, 0.95642509, -0.03695701],
[-0.19866933, 0.0978434, 0.97517033]])
depth_points.rotate(rot_mat)
expected_tensor = torch.tensor([[
6.6239e+00, 3.9748e+01, -2.3335e+00, 6.6660e-01, 1.9560e-01,
4.9740e-01, 9.4090e-01
],
[
-2.3174e+01, 1.2600e+01, -6.9230e+00,
1.5020e-01, 3.7070e-01, 1.0860e-01,
6.2970e-01
],
[
4.7760e+00, 3.5484e+01, -2.3813e+00,
6.5650e-01, 6.2480e-01, 6.9540e-01,
2.5380e-01
],
[
-2.8960e+01, 9.6364e+00, -7.0663e+00,
2.8030e-01, 2.5800e-02, 4.8960e-01,
3.2690e-01
]])
assert torch.allclose(expected_tensor, depth_points.tensor, 1e-3)
new_depth_points = depth_points.clone()
new_depth_points.rotate(0.1, axis=2)
expected_tensor = torch.tensor([[
2.6226e+00, 4.0211e+01, -2.3335e+00, 6.6660e-01, 1.9560e-01,
4.9740e-01, 9.4090e-01
],
[
-2.4316e+01, 1.0224e+01, -6.9230e+00,
1.5020e-01, 3.7070e-01, 1.0860e-01,
6.2970e-01
],
[
1.2096e+00, 3.5784e+01, -2.3813e+00,
6.5650e-01, 6.2480e-01, 6.9540e-01,
2.5380e-01
],
[
-2.9777e+01, 6.6971e+00, -7.0663e+00,
2.8030e-01, 2.5800e-02, 4.8960e-01,
3.2690e-01
]])
assert torch.allclose(expected_tensor, new_depth_points.tensor, 1e-3)
# test points translation
translation_vector = torch.tensor([0.93629336, -0.27509585, 0.21835066])
depth_points.translate(translation_vector)
expected_tensor = torch.tensor([[
7.5602e+00, 3.9473e+01, -2.1152e+00, 6.6660e-01, 1.9560e-01,
4.9740e-01, 9.4090e-01
],
[
-2.2237e+01, 1.2325e+01, -6.7046e+00,
1.5020e-01, 3.7070e-01, 1.0860e-01,
6.2970e-01
],
[
5.7123e+00, 3.5209e+01, -2.1629e+00,
6.5650e-01, 6.2480e-01, 6.9540e-01,
2.5380e-01
],
[
-2.8023e+01, 9.3613e+00, -6.8480e+00,
2.8030e-01, 2.5800e-02, 4.8960e-01,
3.2690e-01
]])
assert torch.allclose(expected_tensor, depth_points.tensor, 1e-4)
# test points filter
point_range = [-10, -40, -10, 10, 40, 10]
in_range_flags = depth_points.in_range_3d(point_range)
expected_flags = torch.tensor([True, False, True, False])
assert torch.all(in_range_flags == expected_flags)
# test points scale
depth_points.scale(1.2)
expected_tensor = torch.tensor([[
9.0722e+00, 4.7368e+01, -2.5382e+00, 6.6660e-01, 1.9560e-01,
4.9740e-01, 9.4090e-01
],
[
-2.6685e+01, 1.4790e+01, -8.0455e+00,
1.5020e-01, 3.7070e-01, 1.0860e-01,
6.2970e-01
],
[
6.8547e+00, 4.2251e+01, -2.5955e+00,
6.5650e-01, 6.2480e-01, 6.9540e-01,
2.5380e-01
],
[
-3.3628e+01, 1.1234e+01, -8.2176e+00,
2.8030e-01, 2.5800e-02, 4.8960e-01,
3.2690e-01
]])
assert torch.allclose(expected_tensor, depth_points.tensor, 1e-3)
# test get_item
expected_tensor = torch.tensor(
[[-26.6848, 14.7898, -8.0455, 0.1502, 0.3707, 0.1086, 0.6297]])
assert torch.allclose(expected_tensor, depth_points[1].tensor, 1e-4)
expected_tensor = torch.tensor(
[[-26.6848, 14.7898, -8.0455, 0.1502, 0.3707, 0.1086, 0.6297],
[6.8547, 42.2509, -2.5955, 0.6565, 0.6248, 0.6954, 0.2538]])
assert torch.allclose(expected_tensor, depth_points[1:3].tensor, 1e-4)
mask = torch.tensor([True, False, True, False])
expected_tensor = torch.tensor(
[[9.0722, 47.3678, -2.5382, 0.6666, 0.1956, 0.4974, 0.9409],
[6.8547, 42.2509, -2.5955, 0.6565, 0.6248, 0.6954, 0.2538]])
assert torch.allclose(expected_tensor, depth_points[mask].tensor, 1e-4)
expected_tensor = torch.tensor([[0.6666], [0.1502], [0.6565], [0.2803]])
assert torch.allclose(expected_tensor, depth_points[:, 3].tensor, 1e-4)
# test length
assert len(depth_points) == 4
# test repr
expected_repr = 'DepthPoints(\n '\
'tensor([[ 9.0722e+00, 4.7368e+01, -2.5382e+00, '\
'6.6660e-01, 1.9560e-01,\n 4.9740e-01, '\
'9.4090e-01],\n '\
'[-2.6685e+01, 1.4790e+01, -8.0455e+00, 1.5020e-01, '\
'3.7070e-01,\n '\
'1.0860e-01, 6.2970e-01],\n '\
'[ 6.8547e+00, 4.2251e+01, -2.5955e+00, 6.5650e-01, '\
'6.2480e-01,\n '\
'6.9540e-01, 2.5380e-01],\n '\
'[-3.3628e+01, 1.1234e+01, -8.2176e+00, 2.8030e-01, '\
'2.5800e-02,\n '\
'4.8960e-01, 3.2690e-01]]))'
assert expected_repr == str(depth_points)
# test concatenate
depth_points_clone = depth_points.clone()
cat_points = DepthPoints.cat([depth_points, depth_points_clone])
assert torch.allclose(cat_points.tensor[:len(depth_points)],
depth_points.tensor)
# test iteration
for i, point in enumerate(depth_points):
assert torch.allclose(point, depth_points.tensor[i])
# test new_point
new_points = depth_points.new_point([[1, 2, 3, 4, 5, 6, 7]])
assert torch.allclose(
new_points.tensor,
torch.tensor([[1, 2, 3, 4, 5, 6, 7]], dtype=depth_points.tensor.dtype))
# test in_range_bev
point_bev_range = [-30, -40, 30, 40]
in_range_flags = depth_points.in_range_bev(point_bev_range)
expected_flags = torch.tensor([False, True, False, False])
assert torch.all(in_range_flags == expected_flags)
# test flip
depth_points.flip(bev_direction='horizontal')
expected_tensor = torch.tensor([[
-9.0722e+00, 4.7368e+01, -2.5382e+00, 6.6660e-01, 1.9560e-01,
4.9740e-01, 9.4090e-01
],
[
2.6685e+01, 1.4790e+01, -8.0455e+00,
1.5020e-01, 3.7070e-01, 1.0860e-01,
6.2970e-01
],
[
-6.8547e+00, 4.2251e+01, -2.5955e+00,
6.5650e-01, 6.2480e-01, 6.9540e-01,
2.5380e-01
],
[
3.3628e+01, 1.1234e+01, -8.2176e+00,
2.8030e-01, 2.5800e-02, 4.8960e-01,
3.2690e-01
]])
assert torch.allclose(expected_tensor, depth_points.tensor, 1e-4)
depth_points.flip(bev_direction='vertical')
expected_tensor = torch.tensor([[
-9.0722e+00, -4.7368e+01, -2.5382e+00, 6.6660e-01, 1.9560e-01,
4.9740e-01, 9.4090e-01
],
[
2.6685e+01, -1.4790e+01, -8.0455e+00,
1.5020e-01, 3.7070e-01, 1.0860e-01,
6.2970e-01
],
[
-6.8547e+00, -4.2251e+01, -2.5955e+00,
6.5650e-01, 6.2480e-01, 6.9540e-01,
2.5380e-01
],
[
3.3628e+01, -1.1234e+01, -8.2176e+00,
2.8030e-01, 2.5800e-02, 4.8960e-01,
3.2690e-01
]])
assert torch.allclose(expected_tensor, depth_points.tensor, 1e-4)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment