Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
mmdetection3d
Commits
32a4328b
Unverified
Commit
32a4328b
authored
Feb 24, 2022
by
Wenwei Zhang
Committed by
GitHub
Feb 24, 2022
Browse files
Bump version to V1.0.0rc0
Bump version to V1.0.0rc0
parents
86cc487c
a8817998
Changes
414
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
1923 additions
and
372 deletions
+1923
-372
tests/test_models/test_common_modules/test_pointnet_modules.py
.../test_models/test_common_modules/test_pointnet_modules.py
+14
-0
tests/test_models/test_common_modules/test_pointnet_ops.py
tests/test_models/test_common_modules/test_pointnet_ops.py
+3
-10
tests/test_models/test_common_modules/test_roiaware_pool3d.py
...s/test_models/test_common_modules/test_roiaware_pool3d.py
+43
-19
tests/test_models/test_detectors.py
tests/test_models/test_detectors.py
+102
-3
tests/test_models/test_forward.py
tests/test_models/test_forward.py
+3
-2
tests/test_models/test_heads/test_dgcnn_decode_head.py
tests/test_models/test_heads/test_dgcnn_decode_head.py
+68
-0
tests/test_models/test_heads/test_heads.py
tests/test_models/test_heads/test_heads.py
+354
-11
tests/test_models/test_heads/test_parta2_bbox_head.py
tests/test_models/test_heads/test_parta2_bbox_head.py
+15
-16
tests/test_models/test_heads/test_roi_extractors.py
tests/test_models/test_heads/test_roi_extractors.py
+29
-3
tests/test_models/test_heads/test_semantic_heads.py
tests/test_models/test_heads/test_semantic_heads.py
+2
-2
tests/test_models/test_necks/test_necks.py
tests/test_models/test_necks/test_necks.py
+75
-0
tests/test_models/test_segmentors.py
tests/test_models/test_segmentors.py
+47
-1
tests/test_runtime/test_apis.py
tests/test_runtime/test_apis.py
+4
-3
tests/test_runtime/test_config.py
tests/test_runtime/test_config.py
+27
-0
tests/test_utils/test_anchors.py
tests/test_utils/test_anchors.py
+22
-22
tests/test_utils/test_bbox_coders.py
tests/test_utils/test_bbox_coders.py
+312
-1
tests/test_utils/test_box3d.py
tests/test_utils/test_box3d.py
+699
-229
tests/test_utils/test_box_np_ops.py
tests/test_utils/test_box_np_ops.py
+29
-15
tests/test_utils/test_coord_3d_mode.py
tests/test_utils/test_coord_3d_mode.py
+71
-35
tests/test_utils/test_points.py
tests/test_utils/test_points.py
+4
-0
No files found.
tests/test_models/test_common_modules/test_pointnet_modules.py
View file @
32a4328b
...
...
@@ -108,6 +108,20 @@ def test_pointnet_sa_module_msg():
assert
new_features
.
shape
==
torch
.
Size
([
1
,
48
,
20
])
assert
inds
.
shape
==
torch
.
Size
([
1
,
20
])
# test num_points = None
self
=
PointSAModuleMSG
(
num_point
=
None
,
radii
=
[
0.2
,
0.4
],
sample_nums
=
[
4
,
8
],
mlp_channels
=
[[
12
,
16
],
[
12
,
32
]],
norm_cfg
=
dict
(
type
=
'BN2d'
),
use_xyz
=
False
,
pool_mod
=
'max'
).
cuda
()
# test forward
new_xyz
,
new_features
,
inds
=
self
(
xyz
,
features
)
assert
new_features
.
shape
==
torch
.
Size
([
1
,
48
,
1
])
# length of 'fps_mod' should be same as 'fps_sample_range_list'
with
pytest
.
raises
(
AssertionError
):
PointSAModuleMSG
(
...
...
tests/test_models/test_common_modules/test_pointnet_ops.py
View file @
32a4328b
...
...
@@ -2,16 +2,9 @@
import
pytest
import
torch
from
mmdet3d.ops
import
(
ball_query
,
furthest_point_sample
,
furthest_point_sample_with_dist
,
gather_points
,
grouping_operation
,
knn
,
three_interpolate
,
three_nn
,
)
from
mmdet3d.ops
import
(
ball_query
,
furthest_point_sample
,
furthest_point_sample_with_dist
,
gather_points
,
grouping_operation
,
knn
,
three_interpolate
,
three_nn
)
def
test_fps
():
...
...
tests/test_models/test_common_modules/test_roiaware_pool3d.py
View file @
32a4328b
# Copyright (c) OpenMMLab. All rights reserved.
import
numpy
as
np
import
pytest
import
torch
from
mmdet3d.ops.roiaware_pool3d
import
(
RoIAwarePool3d
,
points_in_boxes_
batch
,
from
mmdet3d.ops.roiaware_pool3d
import
(
RoIAwarePool3d
,
points_in_boxes_
all
,
points_in_boxes_cpu
,
points_in_boxes_
gpu
)
points_in_boxes_
part
)
def
test_RoIAwarePool3d
():
...
...
@@ -16,8 +17,8 @@ def test_RoIAwarePool3d():
roiaware_pool3d_avg
=
RoIAwarePool3d
(
out_size
=
4
,
max_pts_per_voxel
=
128
,
mode
=
'avg'
)
rois
=
torch
.
tensor
(
[[
1.0
,
2.0
,
3.0
,
4
.0
,
5
.0
,
6.0
,
0.3
],
[
-
10.0
,
23.0
,
16.0
,
10
,
2
0
,
20
,
0.5
]],
[[
1.0
,
2.0
,
3.0
,
5
.0
,
4
.0
,
6.0
,
-
0.3
-
np
.
pi
/
2
],
[
-
10.0
,
23.0
,
16.0
,
20.0
,
10.
0
,
20
.0
,
-
0.5
-
np
.
pi
/
2
]],
dtype
=
torch
.
float32
).
cuda
(
)
# boxes (m, 7) with bottom center in lidar coordinate
pts
=
torch
.
tensor
(
...
...
@@ -41,7 +42,7 @@ def test_RoIAwarePool3d():
torch
.
tensor
(
49.750
).
cuda
(),
1e-3
)
def
test_points_in_boxes_
gpu
():
def
test_points_in_boxes_
part
():
if
not
torch
.
cuda
.
is_available
():
pytest
.
skip
(
'test requires GPU and torch+cuda'
)
boxes
=
torch
.
tensor
(
...
...
@@ -57,45 +58,68 @@ def test_points_in_boxes_gpu():
[
0
,
0
,
0
],
[
6
,
7
,
8
],
[
-
2
,
-
3
,
-
4
],
[
6
,
4
,
9
]]],
dtype
=
torch
.
float32
).
cuda
()
# points (b, m, 3) in lidar coordinate
point_indices
=
points_in_boxes_
gpu
(
points
=
pts
,
boxes
=
boxes
)
point_indices
=
points_in_boxes_
part
(
points
=
pts
,
boxes
=
boxes
)
expected_point_indices
=
torch
.
tensor
(
[[
0
,
0
,
0
,
0
,
0
,
-
1
,
-
1
,
-
1
],
[
-
1
,
-
1
,
-
1
,
-
1
,
-
1
,
-
1
,
-
1
,
-
1
]],
dtype
=
torch
.
int32
).
cuda
()
assert
point_indices
.
shape
==
torch
.
Size
([
2
,
8
])
assert
(
point_indices
==
expected_point_indices
).
all
()
boxes
=
torch
.
tensor
([[[
0.0
,
0.0
,
0.0
,
1.0
,
20.0
,
1.0
,
0.523598
]]],
dtype
=
torch
.
float32
).
cuda
()
# 30 degrees
pts
=
torch
.
tensor
(
[[[
4
,
6.928
,
0
],
[
6.928
,
4
,
0
],
[
4
,
-
6.928
,
0
],
[
6.928
,
-
4
,
0
],
[
-
4
,
6.928
,
0
],
[
-
6.928
,
4
,
0
],
[
-
4
,
-
6.928
,
0
],
[
-
6.928
,
-
4
,
0
]]],
dtype
=
torch
.
float32
).
cuda
()
point_indices
=
points_in_boxes_part
(
points
=
pts
,
boxes
=
boxes
)
expected_point_indices
=
torch
.
tensor
([[
-
1
,
-
1
,
0
,
-
1
,
0
,
-
1
,
-
1
,
-
1
]],
dtype
=
torch
.
int32
).
cuda
()
assert
(
point_indices
==
expected_point_indices
).
all
()
if
torch
.
cuda
.
device_count
()
>
1
:
pts
=
pts
.
to
(
'cuda:1'
)
boxes
=
boxes
.
to
(
'cuda:1'
)
expected_point_indices
=
expected_point_indices
.
to
(
'cuda:1'
)
point_indices
=
points_in_boxes_
gpu
(
points
=
pts
,
boxes
=
boxes
)
point_indices
=
points_in_boxes_
part
(
points
=
pts
,
boxes
=
boxes
)
assert
point_indices
.
shape
==
torch
.
Size
([
2
,
8
])
assert
(
point_indices
==
expected_point_indices
).
all
()
def
test_points_in_boxes_cpu
():
boxes
=
torch
.
tensor
(
[[
1.0
,
2.0
,
3.0
,
4.0
,
5.0
,
6.0
,
0.3
],
[
-
10.0
,
23.0
,
16.0
,
10
,
20
,
20
,
0.5
]],
[[
[
1.0
,
2.0
,
3.0
,
4.0
,
5.0
,
6.0
,
0.3
],
[
-
10.0
,
23.0
,
16.0
,
10
,
20
,
20
,
0.5
]]
]
,
dtype
=
torch
.
float32
)
# boxes (m, 7) with bottom center in lidar coordinate
pts
=
torch
.
tensor
(
[[
1
,
2
,
3.3
],
[
1.2
,
2.5
,
3.0
],
[
0.8
,
2.1
,
3.5
],
[
1.6
,
2.6
,
3.6
],
[
0.8
,
1.2
,
3.9
],
[
-
9.2
,
21.0
,
18.2
],
[
3.8
,
7.9
,
6.3
],
[
4.7
,
3.5
,
-
12.2
],
[
3.8
,
7.6
,
-
2
],
[
-
10.6
,
-
12.9
,
-
20
],
[
-
16
,
-
18
,
9
],
[
-
21.3
,
-
52
,
-
5
],
[
0
,
0
,
0
],
[
6
,
7
,
8
],
[
-
2
,
-
3
,
-
4
]],
[[[
1
,
2
,
3.3
],
[
1.2
,
2.5
,
3.0
],
[
0.8
,
2.1
,
3.5
],
[
1.6
,
2.6
,
3.6
],
[
0.8
,
1.2
,
3.9
],
[
-
9.2
,
21.0
,
18.2
],
[
3.8
,
7.9
,
6.3
],
[
4.7
,
3.5
,
-
12.2
],
[
3.8
,
7.6
,
-
2
],
[
-
10.6
,
-
12.9
,
-
20
],
[
-
16
,
-
18
,
9
],
[
-
21.3
,
-
52
,
-
5
],
[
0
,
0
,
0
],
[
6
,
7
,
8
],
[
-
2
,
-
3
,
-
4
]]],
dtype
=
torch
.
float32
)
# points (n, 3) in lidar coordinate
point_indices
=
points_in_boxes_cpu
(
points
=
pts
,
boxes
=
boxes
)
expected_point_indices
=
torch
.
tensor
(
[[
1
,
1
,
1
,
1
,
1
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
],
[
0
,
0
,
0
,
0
,
0
,
1
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
]],
[[
[
1
,
0
]
,
[
1
,
0
]
,
[
1
,
0
]
,
[
1
,
0
],
[
1
,
0
]
,
[
0
,
1
]
,
[
0
,
0
]
,
[
0
,
0
],
[
0
,
0
]
,
[
0
,
0
]
,
[
0
,
0
]
,
[
0
,
0
]
,
[
0
,
0
]
,
[
0
,
0
]
,
[
0
,
0
]]
]
,
dtype
=
torch
.
int32
)
assert
point_indices
.
shape
==
torch
.
Size
([
2
,
15
])
assert
point_indices
.
shape
==
torch
.
Size
([
1
,
15
,
2
])
assert
(
point_indices
==
expected_point_indices
).
all
()
boxes
=
torch
.
tensor
([[[
0.0
,
0.0
,
0.0
,
1.0
,
20.0
,
1.0
,
0.523598
]]],
dtype
=
torch
.
float32
)
# 30 degrees
pts
=
torch
.
tensor
(
[[[
4
,
6.928
,
0
],
[
6.928
,
4
,
0
],
[
4
,
-
6.928
,
0
],
[
6.928
,
-
4
,
0
],
[
-
4
,
6.928
,
0
],
[
-
6.928
,
4
,
0
],
[
-
4
,
-
6.928
,
0
],
[
-
6.928
,
-
4
,
0
]]],
dtype
=
torch
.
float32
)
point_indices
=
points_in_boxes_cpu
(
points
=
pts
,
boxes
=
boxes
)
expected_point_indices
=
torch
.
tensor
(
[[[
0
],
[
0
],
[
1
],
[
0
],
[
1
],
[
0
],
[
0
],
[
0
]]],
dtype
=
torch
.
int32
)
assert
(
point_indices
==
expected_point_indices
).
all
()
def
test_points_in_boxes_
batch
():
def
test_points_in_boxes_
all
():
if
not
torch
.
cuda
.
is_available
():
pytest
.
skip
(
'test requires GPU and torch+cuda'
)
...
...
@@ -112,7 +136,7 @@ def test_points_in_boxes_batch():
],
[
-
21.3
,
-
52
,
-
5
],
[
0
,
0
,
0
],
[
6
,
7
,
8
],
[
-
2
,
-
3
,
-
4
]]],
dtype
=
torch
.
float32
).
cuda
()
# points (n, 3) in lidar coordinate
point_indices
=
points_in_boxes_
batch
(
points
=
pts
,
boxes
=
boxes
)
point_indices
=
points_in_boxes_
all
(
points
=
pts
,
boxes
=
boxes
)
expected_point_indices
=
torch
.
tensor
(
[[[
1
,
0
],
[
1
,
0
],
[
1
,
0
],
[
1
,
0
],
[
1
,
0
],
[
0
,
1
],
[
0
,
0
],
[
0
,
0
],
[
0
,
0
],
[
0
,
0
],
[
0
,
0
],
[
0
,
0
],
[
0
,
0
],
[
0
,
0
],
[
0
,
0
]]],
...
...
@@ -124,6 +148,6 @@ def test_points_in_boxes_batch():
pts
=
pts
.
to
(
'cuda:1'
)
boxes
=
boxes
.
to
(
'cuda:1'
)
expected_point_indices
=
expected_point_indices
.
to
(
'cuda:1'
)
point_indices
=
points_in_boxes_
batch
(
points
=
pts
,
boxes
=
boxes
)
point_indices
=
points_in_boxes_
all
(
points
=
pts
,
boxes
=
boxes
)
assert
point_indices
.
shape
==
torch
.
Size
([
1
,
15
,
2
])
assert
(
point_indices
==
expected_point_indices
).
all
()
tests/test_models/test_detectors.py
View file @
32a4328b
# Copyright (c) OpenMMLab. All rights reserved.
import
copy
import
random
from
os.path
import
dirname
,
exists
,
join
import
numpy
as
np
import
pytest
import
random
import
torch
from
os.path
import
dirname
,
exists
,
join
from
mmdet3d.core.bbox
import
(
CameraInstance3DBoxes
,
DepthInstance3DBoxes
,
LiDARInstance3DBoxes
)
...
...
@@ -437,7 +438,8 @@ def test_imvoxelnet():
if
not
torch
.
cuda
.
is_available
():
pytest
.
skip
(
'test requires GPU and torch+cuda'
)
imvoxelnet_cfg
=
_get_detector_cfg
(
'imvoxelnet/imvoxelnet_kitti-3d-car.py'
)
imvoxelnet_cfg
=
_get_detector_cfg
(
'imvoxelnet/imvoxelnet_4x8_kitti-3d-car.py'
)
self
=
build_detector
(
imvoxelnet_cfg
).
cuda
()
imgs
=
torch
.
rand
([
1
,
3
,
384
,
1280
],
dtype
=
torch
.
float32
).
cuda
()
gt_bboxes_3d
=
[
LiDARInstance3DBoxes
(
torch
.
rand
([
3
,
7
],
device
=
'cuda'
))]
...
...
@@ -469,3 +471,100 @@ def test_imvoxelnet():
assert
boxes_3d
.
tensor
.
shape
[
1
]
==
7
assert
scores_3d
.
shape
[
0
]
>=
0
assert
labels_3d
.
shape
[
0
]
>=
0
def
test_pointrcnn
():
if
not
torch
.
cuda
.
is_available
():
pytest
.
skip
(
'test requires GPU and torch+cuda'
)
pointrcnn_cfg
=
_get_detector_cfg
(
'pointrcnn/pointrcnn_2x8_kitti-3d-3classes.py'
)
self
=
build_detector
(
pointrcnn_cfg
).
cuda
()
points_0
=
torch
.
rand
([
1000
,
4
],
device
=
'cuda'
)
points_1
=
torch
.
rand
([
1000
,
4
],
device
=
'cuda'
)
points
=
[
points_0
,
points_1
]
img_meta_0
=
dict
(
box_type_3d
=
LiDARInstance3DBoxes
)
img_meta_1
=
dict
(
box_type_3d
=
LiDARInstance3DBoxes
)
img_metas
=
[
img_meta_0
,
img_meta_1
]
gt_bbox_0
=
LiDARInstance3DBoxes
(
torch
.
rand
([
10
,
7
],
device
=
'cuda'
))
gt_bbox_1
=
LiDARInstance3DBoxes
(
torch
.
rand
([
10
,
7
],
device
=
'cuda'
))
gt_bboxes
=
[
gt_bbox_0
,
gt_bbox_1
]
gt_labels_0
=
torch
.
randint
(
0
,
3
,
[
10
],
device
=
'cuda'
)
gt_labels_1
=
torch
.
randint
(
0
,
3
,
[
10
],
device
=
'cuda'
)
gt_labels
=
[
gt_labels_0
,
gt_labels_1
]
# test_forward_train
losses
=
self
.
forward_train
(
points
,
img_metas
,
gt_bboxes
,
gt_labels
)
assert
losses
[
'bbox_loss'
]
>=
0
assert
losses
[
'semantic_loss'
]
>=
0
assert
losses
[
'loss_cls'
]
>=
0
assert
losses
[
'loss_bbox'
]
>=
0
assert
losses
[
'loss_corner'
]
>=
0
def
test_smoke
():
if
not
torch
.
cuda
.
is_available
():
pytest
.
skip
(
'test requires GPU and torch+cuda'
)
_setup_seed
(
0
)
smoke_cfg
=
_get_detector_cfg
(
'smoke/smoke_dla34_pytorch_dlaneck_gn-all_8x4_6x_kitti-mono3d.py'
)
self
=
build_detector
(
smoke_cfg
).
cuda
()
imgs
=
torch
.
rand
([
1
,
3
,
384
,
1280
],
dtype
=
torch
.
float32
).
cuda
()
gt_bboxes
=
[
torch
.
Tensor
([[
563.63122442
,
175.02195182
,
614.81298184
,
224.97763099
],
[
480.89676358
,
179.86272635
,
511.53017463
,
202.54645962
],
[
541.48322272
,
175.73767011
,
564.55208966
,
193.95009791
],
[
329.51448848
,
176.14566789
,
354.24670848
,
213.82599081
]]).
cuda
()
]
gt_bboxes_3d
=
[
CameraInstance3DBoxes
(
torch
.
Tensor
([[
-
0.69
,
1.69
,
25.01
,
3.20
,
1.61
,
1.66
,
-
1.59
],
[
-
7.43
,
1.88
,
47.55
,
3.70
,
1.40
,
1.51
,
1.55
],
[
-
4.71
,
1.71
,
60.52
,
4.05
,
1.46
,
1.66
,
1.56
],
[
-
12.63
,
1.88
,
34.09
,
1.95
,
1.72
,
0.50
,
1.54
]]).
cuda
(),
box_dim
=
7
)
]
gt_labels
=
[
torch
.
tensor
([
0
,
0
,
0
,
1
]).
cuda
()]
gt_labels_3d
=
gt_labels
centers2d
=
[
torch
.
Tensor
([[
589.6528477
,
198.3862263
],
[
496.8143155
,
190.75967182
],
[
553.40528354
,
184.53785991
],
[
342.23690317
,
194.44298819
]]).
cuda
()
]
# depths is actually not used in smoke head loss computation
depths
=
[
torch
.
rand
([
3
],
dtype
=
torch
.
float32
).
cuda
()]
attr_labels
=
None
img_metas
=
[
dict
(
cam_intrinsic
=
[[
721.5377
,
0.
,
609.5593
,
0.
],
[
0.
,
721.5377
,
172.854
,
0.
],
[
0.
,
0.
,
1.
,
0.
],
[
0.
,
0.
,
0.
,
1.
]],
scale_factor
=
np
.
array
([
1.
,
1.
,
1.
,
1.
],
dtype
=
np
.
float32
),
pad_shape
=
[
384
,
1280
],
trans_mat
=
np
.
array
([[
0.25
,
0.
,
0.
],
[
0.
,
0.25
,
0
],
[
0.
,
0.
,
1.
]],
dtype
=
np
.
float32
),
affine_aug
=
False
,
box_type_3d
=
CameraInstance3DBoxes
)
]
# test forward_train
losses
=
self
.
forward_train
(
imgs
,
img_metas
,
gt_bboxes
,
gt_labels
,
gt_bboxes_3d
,
gt_labels_3d
,
centers2d
,
depths
,
attr_labels
)
assert
losses
[
'loss_cls'
]
>=
0
assert
losses
[
'loss_bbox'
]
>=
0
# test simple_test
with
torch
.
no_grad
():
results
=
self
.
simple_test
(
imgs
,
img_metas
)
boxes_3d
=
results
[
0
][
'img_bbox'
][
'boxes_3d'
]
scores_3d
=
results
[
0
][
'img_bbox'
][
'scores_3d'
]
labels_3d
=
results
[
0
][
'img_bbox'
][
'labels_3d'
]
assert
boxes_3d
.
tensor
.
shape
[
0
]
>=
0
assert
boxes_3d
.
tensor
.
shape
[
1
]
==
7
assert
scores_3d
.
shape
[
0
]
>=
0
assert
labels_3d
.
shape
[
0
]
>=
0
tests/test_models/test_forward.py
View file @
32a4328b
...
...
@@ -6,9 +6,10 @@ CommandLine:
xdoctest tests/test_models/test_forward.py zero
"""
import
copy
from
os.path
import
dirname
,
exists
,
join
import
numpy
as
np
import
torch
from
os.path
import
dirname
,
exists
,
join
def
_get_config_directory
():
...
...
@@ -148,7 +149,7 @@ def _demo_mm_inputs(input_shape=(1, 3, 300, 300),
input_shape (tuple):
input batch dimensions
num_items (
None |
List[int]):
num_items (List[int]):
specifies the number of boxes in each batch item
num_classes (int):
...
...
tests/test_models/test_heads/test_dgcnn_decode_head.py
0 → 100644
View file @
32a4328b
# Copyright (c) OpenMMLab. All rights reserved.
import
numpy
as
np
import
pytest
import
torch
from
mmcv.cnn.bricks
import
ConvModule
from
mmdet3d.models.builder
import
build_head
def
test_dgcnn_decode_head_loss
():
if
not
torch
.
cuda
.
is_available
():
pytest
.
skip
(
'test requires GPU and torch+cuda'
)
dgcnn_decode_head_cfg
=
dict
(
type
=
'DGCNNHead'
,
fp_channels
=
(
1024
,
512
),
channels
=
256
,
num_classes
=
13
,
dropout_ratio
=
0.5
,
conv_cfg
=
dict
(
type
=
'Conv1d'
),
norm_cfg
=
dict
(
type
=
'BN1d'
),
act_cfg
=
dict
(
type
=
'LeakyReLU'
,
negative_slope
=
0.2
),
loss_decode
=
dict
(
type
=
'CrossEntropyLoss'
,
use_sigmoid
=
False
,
class_weight
=
None
,
loss_weight
=
1.0
),
ignore_index
=
13
)
self
=
build_head
(
dgcnn_decode_head_cfg
)
self
.
cuda
()
assert
isinstance
(
self
.
conv_seg
,
torch
.
nn
.
Conv1d
)
assert
self
.
conv_seg
.
in_channels
==
256
assert
self
.
conv_seg
.
out_channels
==
13
assert
self
.
conv_seg
.
kernel_size
==
(
1
,
)
assert
isinstance
(
self
.
pre_seg_conv
,
ConvModule
)
assert
isinstance
(
self
.
pre_seg_conv
.
conv
,
torch
.
nn
.
Conv1d
)
assert
self
.
pre_seg_conv
.
conv
.
in_channels
==
512
assert
self
.
pre_seg_conv
.
conv
.
out_channels
==
256
assert
self
.
pre_seg_conv
.
conv
.
kernel_size
==
(
1
,
)
assert
isinstance
(
self
.
pre_seg_conv
.
bn
,
torch
.
nn
.
BatchNorm1d
)
assert
self
.
pre_seg_conv
.
bn
.
num_features
==
256
# test forward
fa_points
=
torch
.
rand
(
2
,
4096
,
1024
).
float
().
cuda
()
input_dict
=
dict
(
fa_points
=
fa_points
)
seg_logits
=
self
(
input_dict
)
assert
seg_logits
.
shape
==
torch
.
Size
([
2
,
13
,
4096
])
# test loss
pts_semantic_mask
=
torch
.
randint
(
0
,
13
,
(
2
,
4096
)).
long
().
cuda
()
losses
=
self
.
losses
(
seg_logits
,
pts_semantic_mask
)
assert
losses
[
'loss_sem_seg'
].
item
()
>
0
# test loss with ignore_index
ignore_index_mask
=
torch
.
ones_like
(
pts_semantic_mask
)
*
13
losses
=
self
.
losses
(
seg_logits
,
ignore_index_mask
)
assert
losses
[
'loss_sem_seg'
].
item
()
==
0
# test loss with class_weight
dgcnn_decode_head_cfg
[
'loss_decode'
]
=
dict
(
type
=
'CrossEntropyLoss'
,
use_sigmoid
=
False
,
class_weight
=
np
.
random
.
rand
(
13
),
loss_weight
=
1.0
)
self
=
build_head
(
dgcnn_decode_head_cfg
)
self
.
cuda
()
losses
=
self
.
losses
(
seg_logits
,
pts_semantic_mask
)
assert
losses
[
'loss_sem_seg'
].
item
()
>
0
tests/test_models/test_heads/test_heads.py
View file @
32a4328b
# Copyright (c) OpenMMLab. All rights reserved.
import
copy
import
random
from
os.path
import
dirname
,
exists
,
join
import
mmcv
import
numpy
as
np
import
pytest
import
random
import
torch
from
os.path
import
dirname
,
exists
,
join
from
mmdet3d.core.bbox
import
(
Box3DMode
,
CameraInstance3DBoxes
,
DepthInstance3DBoxes
,
LiDARInstance3DBoxes
)
...
...
@@ -116,6 +118,23 @@ def _get_pts_bbox_head_cfg(fname):
return
pts_bbox_head
def
_get_pointrcnn_rpn_head_cfg
(
fname
):
"""Grab configs necessary to create a rpn_head.
These are deep copied to allow for safe modification of parameters without
influencing other tests.
"""
config
=
_get_config_module
(
fname
)
model
=
copy
.
deepcopy
(
config
.
model
)
train_cfg
=
mmcv
.
Config
(
copy
.
deepcopy
(
config
.
model
.
train_cfg
))
test_cfg
=
mmcv
.
Config
(
copy
.
deepcopy
(
config
.
model
.
test_cfg
))
rpn_head
=
model
.
rpn_head
rpn_head
.
update
(
train_cfg
=
train_cfg
.
rpn
)
rpn_head
.
update
(
test_cfg
=
test_cfg
.
rpn
)
return
rpn_head
,
train_cfg
.
rpn
.
rpn_proposal
def
_get_vote_head_cfg
(
fname
):
"""Grab configs necessary to create a vote_head.
...
...
@@ -147,6 +166,14 @@ def _get_parta2_bbox_head_cfg(fname):
return
vote_head
def
_get_pointrcnn_bbox_head_cfg
(
fname
):
config
=
_get_config_module
(
fname
)
model
=
copy
.
deepcopy
(
config
.
model
)
vote_head
=
model
.
roi_head
.
bbox_head
return
vote_head
def
test_anchor3d_head_loss
():
if
not
torch
.
cuda
.
is_available
():
pytest
.
skip
(
'test requires GPU and torch+cuda'
)
...
...
@@ -263,6 +290,39 @@ def test_parta2_rpnhead_getboxes():
assert
result_list
[
0
][
'boxes_3d'
].
tensor
.
shape
==
torch
.
Size
([
512
,
7
])
def
test_pointrcnn_rpnhead_getboxes
():
if
not
torch
.
cuda
.
is_available
():
pytest
.
skip
(
'test requires GPU and torch+cuda'
)
rpn_head_cfg
,
proposal_cfg
=
_get_pointrcnn_rpn_head_cfg
(
'./pointrcnn/pointrcnn_2x8_kitti-3d-3classes.py'
)
self
=
build_head
(
rpn_head_cfg
)
self
.
cuda
()
fp_features
=
torch
.
rand
([
2
,
128
,
1024
],
dtype
=
torch
.
float32
).
cuda
()
feats
=
{
'fp_features'
:
fp_features
}
# fake input_metas
input_metas
=
[{
'sample_idx'
:
1234
,
'box_type_3d'
:
LiDARInstance3DBoxes
,
'box_mode_3d'
:
Box3DMode
.
LIDAR
},
{
'sample_idx'
:
2345
,
'box_type_3d'
:
LiDARInstance3DBoxes
,
'box_mode_3d'
:
Box3DMode
.
LIDAR
}]
(
bbox_preds
,
cls_preds
)
=
self
.
forward
(
feats
)
assert
bbox_preds
.
shape
==
(
2
,
1024
,
8
)
assert
cls_preds
.
shape
==
(
2
,
1024
,
3
)
points
=
torch
.
rand
([
2
,
1024
,
3
],
dtype
=
torch
.
float32
).
cuda
()
result_list
=
self
.
get_bboxes
(
points
,
bbox_preds
,
cls_preds
,
input_metas
)
max_num
=
proposal_cfg
.
max_num
bbox
,
score_selected
,
labels
,
cls_preds_selected
=
result_list
[
0
]
assert
bbox
.
tensor
.
shape
==
(
max_num
,
7
)
assert
score_selected
.
shape
==
(
max_num
,
)
assert
labels
.
shape
==
(
max_num
,
)
assert
cls_preds_selected
.
shape
==
(
max_num
,
3
)
def
test_vote_head
():
if
not
torch
.
cuda
.
is_available
():
pytest
.
skip
(
'test requires GPU and torch+cuda'
)
...
...
@@ -358,6 +418,102 @@ def test_vote_head():
assert
results
[
0
][
2
].
shape
[
0
]
>=
0
def
test_smoke_mono3d_head
():
head_cfg
=
dict
(
type
=
'SMOKEMono3DHead'
,
num_classes
=
3
,
in_channels
=
64
,
dim_channel
=
[
3
,
4
,
5
],
ori_channel
=
[
6
,
7
],
stacked_convs
=
0
,
feat_channels
=
64
,
use_direction_classifier
=
False
,
diff_rad_by_sin
=
False
,
pred_attrs
=
False
,
pred_velo
=
False
,
dir_offset
=
0
,
strides
=
None
,
group_reg_dims
=
(
8
,
),
cls_branch
=
(
256
,
),
reg_branch
=
((
256
,
),
),
num_attrs
=
0
,
bbox_code_size
=
7
,
dir_branch
=
(),
attr_branch
=
(),
bbox_coder
=
dict
(
type
=
'SMOKECoder'
,
base_depth
=
(
28.01
,
16.32
),
base_dims
=
((
0.88
,
1.73
,
0.67
),
(
1.78
,
1.70
,
0.58
),
(
3.88
,
1.63
,
1.53
)),
code_size
=
7
),
loss_cls
=
dict
(
type
=
'GaussianFocalLoss'
,
loss_weight
=
1.0
),
loss_bbox
=
dict
(
type
=
'L1Loss'
,
reduction
=
'sum'
,
loss_weight
=
1
/
300
),
loss_dir
=
dict
(
type
=
'CrossEntropyLoss'
,
use_sigmoid
=
False
,
loss_weight
=
1.0
),
loss_attr
=
None
,
conv_bias
=
True
,
dcn_on_last_conv
=
False
)
self
=
build_head
(
head_cfg
)
feats
=
[
torch
.
rand
([
2
,
64
,
32
,
32
],
dtype
=
torch
.
float32
)]
# test forward
ret_dict
=
self
(
feats
)
assert
len
(
ret_dict
)
==
2
assert
len
(
ret_dict
[
0
])
==
1
assert
ret_dict
[
0
][
0
].
shape
==
torch
.
Size
([
2
,
3
,
32
,
32
])
assert
ret_dict
[
1
][
0
].
shape
==
torch
.
Size
([
2
,
8
,
32
,
32
])
# test loss
gt_bboxes
=
[
torch
.
Tensor
([[
1.0
,
2.0
,
20.0
,
40.0
],
[
45.0
,
50.0
,
80.0
,
70.1
],
[
34.0
,
39.0
,
65.0
,
64.0
]]),
torch
.
Tensor
([[
11.0
,
22.0
,
29.0
,
31.0
],
[
41.0
,
55.0
,
60.0
,
99.0
],
[
29.0
,
29.0
,
65.0
,
56.0
]])
]
gt_bboxes_3d
=
[
CameraInstance3DBoxes
(
torch
.
rand
([
3
,
7
]),
box_dim
=
7
),
CameraInstance3DBoxes
(
torch
.
rand
([
3
,
7
]),
box_dim
=
7
)
]
gt_labels
=
[
torch
.
randint
(
0
,
3
,
[
3
])
for
i
in
range
(
2
)]
gt_labels_3d
=
gt_labels
centers2d
=
[
torch
.
randint
(
0
,
60
,
(
3
,
2
)),
torch
.
randint
(
0
,
40
,
(
3
,
2
))]
depths
=
[
torch
.
rand
([
3
],
dtype
=
torch
.
float32
),
torch
.
rand
([
3
],
dtype
=
torch
.
float32
)
]
attr_labels
=
None
img_metas
=
[
dict
(
cam2img
=
[[
1260.8474446004698
,
0.0
,
807.968244525554
,
40.1111
],
[
0.0
,
1260.8474446004698
,
495.3344268742088
,
2.34422
],
[
0.0
,
0.0
,
1.0
,
0.00333333
],
[
0.0
,
0.0
,
0.0
,
1.0
]],
scale_factor
=
np
.
array
([
1.
,
1.
,
1.
,
1.
],
dtype
=
np
.
float32
),
pad_shape
=
[
128
,
128
],
trans_mat
=
np
.
array
([[
0.25
,
0.
,
0.
],
[
0.
,
0.25
,
0
],
[
0.
,
0.
,
1.
]],
dtype
=
np
.
float32
),
affine_aug
=
False
,
box_type_3d
=
CameraInstance3DBoxes
)
for
i
in
range
(
2
)
]
losses
=
self
.
loss
(
*
ret_dict
,
gt_bboxes
,
gt_labels
,
gt_bboxes_3d
,
gt_labels_3d
,
centers2d
,
depths
,
attr_labels
,
img_metas
)
assert
losses
[
'loss_cls'
]
>=
0
assert
losses
[
'loss_bbox'
]
>=
0
# test get_boxes
results
=
self
.
get_bboxes
(
*
ret_dict
,
img_metas
)
assert
len
(
results
)
==
2
assert
len
(
results
[
0
])
==
4
assert
results
[
0
][
0
].
tensor
.
shape
==
torch
.
Size
([
100
,
7
])
assert
results
[
0
][
1
].
shape
==
torch
.
Size
([
100
])
assert
results
[
0
][
2
].
shape
==
torch
.
Size
([
100
])
assert
results
[
0
][
3
]
is
None
def
test_parta2_bbox_head
():
parta2_bbox_head_cfg
=
_get_parta2_bbox_head_cfg
(
'./parta2/hv_PartA2_secfpn_2x8_cyclic_80e_kitti-3d-3class.py'
)
...
...
@@ -370,6 +526,18 @@ def test_parta2_bbox_head():
assert
bbox_pred
.
shape
==
(
256
,
7
)
def
test_pointrcnn_bbox_head
():
if
not
torch
.
cuda
.
is_available
():
pytest
.
skip
(
'test requires GPU and torch+cuda'
)
pointrcnn_bbox_head_cfg
=
_get_pointrcnn_bbox_head_cfg
(
'./pointrcnn/pointrcnn_2x8_kitti-3d-3classes.py'
)
self
=
build_head
(
pointrcnn_bbox_head_cfg
).
cuda
()
feats
=
torch
.
rand
([
100
,
512
,
133
]).
cuda
()
rcnn_cls
,
rcnn_reg
=
self
.
forward
(
feats
)
assert
rcnn_cls
.
shape
==
(
100
,
1
)
assert
rcnn_reg
.
shape
==
(
100
,
7
)
def
test_part_aggregation_ROI_head
():
if
not
torch
.
cuda
.
is_available
():
pytest
.
skip
(
'test requires GPU and torch+cuda'
)
...
...
@@ -444,6 +612,50 @@ def test_part_aggregation_ROI_head():
assert
labels_3d
.
shape
==
(
12
,
)
def
test_pointrcnn_roi_head
():
if
not
torch
.
cuda
.
is_available
():
pytest
.
skip
(
'test requires GPU and torch+cuda'
)
roi_head_cfg
=
_get_roi_head_cfg
(
'./pointrcnn/pointrcnn_2x8_kitti-3d-3classes.py'
)
self
=
build_head
(
roi_head_cfg
).
cuda
()
features
=
torch
.
rand
([
3
,
128
,
16384
]).
cuda
()
points
=
torch
.
rand
([
3
,
16384
,
3
]).
cuda
()
points_cls_preds
=
torch
.
rand
([
3
,
16384
,
3
]).
cuda
()
rcnn_feats
=
{
'features'
:
features
,
'points'
:
points
,
'points_cls_preds'
:
points_cls_preds
}
boxes_3d
=
LiDARInstance3DBoxes
(
torch
.
rand
(
50
,
7
).
cuda
())
labels_3d
=
torch
.
randint
(
low
=
0
,
high
=
2
,
size
=
[
50
]).
cuda
()
proposal
=
{
'boxes_3d'
:
boxes_3d
,
'labels_3d'
:
labels_3d
}
proposal_list
=
[
proposal
for
i
in
range
(
3
)]
gt_bboxes_3d
=
[
LiDARInstance3DBoxes
(
torch
.
rand
([
5
,
7
],
device
=
'cuda'
))
for
i
in
range
(
3
)
]
gt_labels_3d
=
[
torch
.
randint
(
0
,
2
,
[
5
],
device
=
'cuda'
)
for
i
in
range
(
3
)]
box_type_3d
=
LiDARInstance3DBoxes
img_metas
=
[
dict
(
box_type_3d
=
box_type_3d
)
for
i
in
range
(
3
)]
losses
=
self
.
forward_train
(
rcnn_feats
,
img_metas
,
proposal_list
,
gt_bboxes_3d
,
gt_labels_3d
)
assert
losses
[
'loss_cls'
]
>=
0
assert
losses
[
'loss_bbox'
]
>=
0
assert
losses
[
'loss_corner'
]
>=
0
bbox_results
=
self
.
simple_test
(
rcnn_feats
,
img_metas
,
proposal_list
)
boxes_3d
=
bbox_results
[
0
][
'boxes_3d'
]
scores_3d
=
bbox_results
[
0
][
'scores_3d'
]
labels_3d
=
bbox_results
[
0
][
'labels_3d'
]
assert
boxes_3d
.
tensor
.
shape
[
1
]
==
7
assert
boxes_3d
.
tensor
.
shape
[
0
]
==
scores_3d
.
shape
[
0
]
assert
scores_3d
.
shape
[
0
]
==
labels_3d
.
shape
[
0
]
def
test_free_anchor_3D_head
():
if
not
torch
.
cuda
.
is_available
():
pytest
.
skip
(
'test requires GPU and torch+cuda'
)
...
...
@@ -604,7 +816,7 @@ def test_h3d_head():
h3d_head_cfg
.
bbox_head
.
num_proposal
=
num_proposal
self
=
build_head
(
h3d_head_cfg
).
cuda
()
# prepare
roi
outputs
# prepare
RoI
outputs
fp_xyz
=
[
torch
.
rand
([
1
,
num_point
,
3
],
dtype
=
torch
.
float32
).
cuda
()]
hd_features
=
torch
.
rand
([
1
,
256
,
num_point
],
dtype
=
torch
.
float32
).
cuda
()
fp_indices
=
[
torch
.
randint
(
0
,
128
,
[
1
,
num_point
]).
cuda
()]
...
...
@@ -1144,7 +1356,7 @@ def test_groupfree3d_head():
assert
ret_dict
[
's5.sem_scores'
].
shape
==
torch
.
Size
([
2
,
256
,
18
])
# test losses
points
=
[
torch
.
rand
([
5000
0
,
4
],
device
=
'cuda'
)
for
i
in
range
(
2
)]
points
=
[
torch
.
rand
([
5000
,
4
],
device
=
'cuda'
)
for
i
in
range
(
2
)]
gt_bbox1
=
torch
.
rand
([
10
,
7
],
dtype
=
torch
.
float32
).
cuda
()
gt_bbox2
=
torch
.
rand
([
10
,
7
],
dtype
=
torch
.
float32
).
cuda
()
...
...
@@ -1152,12 +1364,12 @@ def test_groupfree3d_head():
gt_bbox2
=
DepthInstance3DBoxes
(
gt_bbox2
)
gt_bboxes
=
[
gt_bbox1
,
gt_bbox2
]
pts_instance_mask_1
=
torch
.
randint
(
0
,
10
,
[
5000
0
],
device
=
'cuda'
)
pts_instance_mask_2
=
torch
.
randint
(
0
,
10
,
[
5000
0
],
device
=
'cuda'
)
pts_instance_mask_1
=
torch
.
randint
(
0
,
10
,
[
5000
],
device
=
'cuda'
)
pts_instance_mask_2
=
torch
.
randint
(
0
,
10
,
[
5000
],
device
=
'cuda'
)
pts_instance_mask
=
[
pts_instance_mask_1
,
pts_instance_mask_2
]
pts_semantic_mask_1
=
torch
.
randint
(
0
,
19
,
[
5000
0
],
device
=
'cuda'
)
pts_semantic_mask_2
=
torch
.
randint
(
0
,
19
,
[
5000
0
],
device
=
'cuda'
)
pts_semantic_mask_1
=
torch
.
randint
(
0
,
19
,
[
5000
],
device
=
'cuda'
)
pts_semantic_mask_2
=
torch
.
randint
(
0
,
19
,
[
5000
],
device
=
'cuda'
)
pts_semantic_mask
=
[
pts_semantic_mask_1
,
pts_semantic_mask_2
]
labels_1
=
torch
.
randint
(
0
,
18
,
[
10
],
device
=
'cuda'
)
...
...
@@ -1178,7 +1390,7 @@ def test_groupfree3d_head():
# test multiclass_nms_single
obj_scores
=
torch
.
rand
([
256
],
device
=
'cuda'
)
sem_scores
=
torch
.
rand
([
256
,
18
],
device
=
'cuda'
)
points
=
torch
.
rand
([
5000
0
,
3
],
device
=
'cuda'
)
points
=
torch
.
rand
([
5000
,
3
],
device
=
'cuda'
)
bbox
=
torch
.
rand
([
256
,
7
],
device
=
'cuda'
)
input_meta
=
dict
(
box_type_3d
=
DepthInstance3DBoxes
)
bbox_selected
,
score_selected
,
labels
=
\
...
...
@@ -1193,9 +1405,9 @@ def test_groupfree3d_head():
assert
labels
.
shape
[
0
]
>=
0
# test get_boxes
points
=
torch
.
rand
([
1
,
5000
0
,
3
],
device
=
'cuda'
)
points
=
torch
.
rand
([
1
,
5000
,
3
],
device
=
'cuda'
)
seed_points
=
torch
.
rand
([
1
,
1024
,
3
],
device
=
'cuda'
)
seed_indices
=
torch
.
randint
(
0
,
5000
0
,
[
1
,
1024
],
device
=
'cuda'
)
seed_indices
=
torch
.
randint
(
0
,
5000
,
[
1
,
1024
],
device
=
'cuda'
)
obj_scores
=
torch
.
rand
([
1
,
256
,
1
],
device
=
'cuda'
)
center
=
torch
.
rand
([
1
,
256
,
3
],
device
=
'cuda'
)
dir_class
=
torch
.
rand
([
1
,
256
,
1
],
device
=
'cuda'
)
...
...
@@ -1222,3 +1434,134 @@ def test_groupfree3d_head():
assert
results
[
0
][
0
].
tensor
.
shape
[
1
]
==
7
assert
results
[
0
][
1
].
shape
[
0
]
>=
0
assert
results
[
0
][
2
].
shape
[
0
]
>=
0
def
test_pgd_head
():
if
not
torch
.
cuda
.
is_available
():
pytest
.
skip
(
'test requires GPU and torch+cuda'
)
_setup_seed
(
0
)
pgd_head_cfg
=
_get_head_cfg
(
'pgd/pgd_r101_caffe_fpn_gn-head_3x4_4x_kitti-mono3d.py'
)
self
=
build_head
(
pgd_head_cfg
).
cuda
()
feats
=
[
torch
.
rand
([
2
,
256
,
96
,
312
],
dtype
=
torch
.
float32
).
cuda
(),
torch
.
rand
([
2
,
256
,
48
,
156
],
dtype
=
torch
.
float32
).
cuda
(),
torch
.
rand
([
2
,
256
,
24
,
78
],
dtype
=
torch
.
float32
).
cuda
(),
torch
.
rand
([
2
,
256
,
12
,
39
],
dtype
=
torch
.
float32
).
cuda
(),
]
# test forward
ret_dict
=
self
(
feats
)
assert
len
(
ret_dict
)
==
7
assert
len
(
ret_dict
[
0
])
==
4
assert
ret_dict
[
0
][
0
].
shape
==
torch
.
Size
([
2
,
3
,
96
,
312
])
# test loss
gt_bboxes
=
[
torch
.
rand
([
3
,
4
],
dtype
=
torch
.
float32
).
cuda
(),
torch
.
rand
([
3
,
4
],
dtype
=
torch
.
float32
).
cuda
()
]
gt_bboxes_3d
=
CameraInstance3DBoxes
(
torch
.
rand
([
3
,
7
],
device
=
'cuda'
),
box_dim
=
7
)
gt_labels
=
[
torch
.
randint
(
0
,
3
,
[
3
],
device
=
'cuda'
)
for
i
in
range
(
2
)]
gt_labels_3d
=
gt_labels
centers2d
=
[
torch
.
rand
([
3
,
2
],
dtype
=
torch
.
float32
).
cuda
(),
torch
.
rand
([
3
,
2
],
dtype
=
torch
.
float32
).
cuda
()
]
depths
=
[
torch
.
rand
([
3
],
dtype
=
torch
.
float32
).
cuda
(),
torch
.
rand
([
3
],
dtype
=
torch
.
float32
).
cuda
()
]
attr_labels
=
None
img_metas
=
[
dict
(
img_shape
=
[
384
,
1248
],
cam2img
=
[[
721.5377
,
0.0
,
609.5593
,
44.85728
],
[
0.0
,
721.5377
,
172.854
,
0.2163791
],
[
0.0
,
0.0
,
1.0
,
0.002745884
],
[
0.0
,
0.0
,
0.0
,
1.0
]],
scale_factor
=
np
.
array
([
1.
,
1.
,
1.
,
1.
],
dtype
=
np
.
float32
),
box_type_3d
=
CameraInstance3DBoxes
)
for
i
in
range
(
2
)
]
losses
=
self
.
loss
(
*
ret_dict
,
gt_bboxes
,
gt_labels
,
gt_bboxes_3d
,
gt_labels_3d
,
centers2d
,
depths
,
attr_labels
,
img_metas
)
assert
losses
[
'loss_cls'
]
>=
0
assert
losses
[
'loss_offset'
]
>=
0
assert
losses
[
'loss_depth'
]
>=
0
assert
losses
[
'loss_size'
]
>=
0
assert
losses
[
'loss_rotsin'
]
>=
0
assert
losses
[
'loss_centerness'
]
>=
0
assert
losses
[
'loss_kpts'
]
>=
0
assert
losses
[
'loss_bbox2d'
]
>=
0
assert
losses
[
'loss_consistency'
]
>=
0
assert
losses
[
'loss_dir'
]
>=
0
# test get_boxes
results
=
self
.
get_bboxes
(
*
ret_dict
,
img_metas
)
assert
len
(
results
)
==
2
assert
len
(
results
[
0
])
==
5
assert
results
[
0
][
0
].
tensor
.
shape
==
torch
.
Size
([
20
,
7
])
assert
results
[
0
][
1
].
shape
==
torch
.
Size
([
20
])
assert
results
[
0
][
2
].
shape
==
torch
.
Size
([
20
])
assert
results
[
0
][
3
]
is
None
assert
results
[
0
][
4
].
shape
==
torch
.
Size
([
20
,
5
])
def
test_monoflex_head
():
head_cfg
=
dict
(
type
=
'MonoFlexHead'
,
num_classes
=
3
,
in_channels
=
64
,
use_edge_fusion
=
True
,
edge_fusion_inds
=
[(
1
,
0
)],
edge_heatmap_ratio
=
1
/
8
,
stacked_convs
=
0
,
feat_channels
=
64
,
use_direction_classifier
=
False
,
diff_rad_by_sin
=
False
,
pred_attrs
=
False
,
pred_velo
=
False
,
dir_offset
=
0
,
strides
=
None
,
group_reg_dims
=
((
4
,
),
(
2
,
),
(
20
,
),
(
3
,
),
(
3
,
),
(
8
,
8
),
(
1
,
),
(
1
,
)),
cls_branch
=
(
256
,
),
reg_branch
=
((
256
,
),
(
256
,
),
(
256
,
),
(
256
,
),
(
256
,
),
(
256
,
),
(
256
,
),
(
256
,
)),
num_attrs
=
0
,
bbox_code_size
=
7
,
dir_branch
=
(),
attr_branch
=
(),
bbox_coder
=
dict
(
type
=
'MonoFlexCoder'
,
depth_mode
=
'exp'
,
base_depth
=
(
26.494627
,
16.05988
),
depth_range
=
[
0.1
,
100
],
combine_depth
=
True
,
uncertainty_range
=
[
-
10
,
10
],
base_dims
=
((
3.8840
,
1.5261
,
1.6286
,
0.4259
,
0.1367
,
0.1022
),
(
0.8423
,
1.7607
,
0.6602
,
0.2349
,
0.1133
,
0.1427
),
(
1.7635
,
1.7372
,
0.5968
,
0.1766
,
0.0948
,
0.1242
)),
dims_mode
=
'linear'
,
multibin
=
True
,
num_dir_bins
=
4
,
bin_centers
=
[
0
,
np
.
pi
/
2
,
np
.
pi
,
-
np
.
pi
/
2
],
bin_margin
=
np
.
pi
/
6
,
code_size
=
7
),
conv_bias
=
True
,
dcn_on_last_conv
=
False
)
self
=
build_head
(
head_cfg
)
feats
=
[
torch
.
rand
([
2
,
64
,
32
,
32
],
dtype
=
torch
.
float32
)]
input_metas
=
[
dict
(
img_shape
=
(
110
,
110
),
pad_shape
=
(
128
,
128
)),
dict
(
img_shape
=
(
98
,
110
),
pad_shape
=
(
128
,
128
))
]
cls_score
,
out_reg
=
self
(
feats
,
input_metas
)
assert
cls_score
[
0
].
shape
==
torch
.
Size
([
2
,
3
,
32
,
32
])
assert
out_reg
[
0
].
shape
==
torch
.
Size
([
2
,
50
,
32
,
32
])
tests/test_models/test_heads/test_parta2_bbox_head.py
View file @
32a4328b
...
...
@@ -76,7 +76,7 @@ def test_loss():
2.0579e-02
,
1.5005e-04
,
3.5252e-05
,
0.0000e+00
,
2.0433e-05
,
1.5422e-05
])
expected_loss_bbox
=
torch
.
as_tensor
(
0.0622
)
expected_loss_corner
=
torch
.
Tensor
([
0.137
9
])
expected_loss_corner
=
torch
.
Tensor
([
0.137
4
])
assert
torch
.
allclose
(
loss
[
'loss_cls'
],
expected_loss_cls
,
1e-3
)
assert
torch
.
allclose
(
loss
[
'loss_bbox'
],
expected_loss_bbox
,
1e-3
)
...
...
@@ -201,7 +201,7 @@ def test_get_targets():
])
expected_bbox_targets
=
torch
.
Tensor
(
[[
0.0
805
,
0.0
130
,
0.0047
,
0.0542
,
-
0.2252
,
0.0299
,
-
0.1495
]])
[[
-
0.0
632
,
0.0
516
,
0.0047
,
0.0542
,
-
0.2252
,
0.0299
,
-
0.1495
]])
expected_pos_gt_bboxes
=
torch
.
Tensor
(
[[
7.8417
,
-
0.1405
,
-
1.9652
,
1.6122
,
3.2838
,
1.5331
,
-
2.0835
]])
...
...
@@ -345,12 +345,11 @@ def test_get_bboxes():
selected_bboxes
,
selected_scores
,
selected_label_preds
=
result_list
[
0
]
expected_selected_bboxes
=
torch
.
Tensor
(
[[
56.
2170
,
25.
9074
,
-
1.3610
,
1.6025
,
3.6730
,
1.5128
,
-
0.1179
],
[
54.
6521
,
28.8846
,
-
1.9145
,
1.6362
,
4.0573
,
1.5599
,
-
1.7335
],
[
31.
6179
,
-
5.
600
4
,
-
1.2470
,
1.6458
,
4.1622
,
1.5632
,
-
1.5734
]]).
cuda
()
[[
56.
0888
,
25.
6445
,
-
1.3610
,
1.6025
,
3.6730
,
1.5128
,
-
0.1179
],
[
54.
4606
,
29.2412
,
-
1.9145
,
1.6362
,
4.0573
,
1.5599
,
-
1.7335
],
[
31.
8887
,
-
5.
857
4
,
-
1.2470
,
1.6458
,
4.1622
,
1.5632
,
-
1.5734
]]).
cuda
()
expected_selected_scores
=
torch
.
Tensor
([
-
2.2061
,
-
2.1121
,
-
0.1761
]).
cuda
()
expected_selected_label_preds
=
torch
.
Tensor
([
2.
,
2.
,
2.
]).
cuda
()
assert
torch
.
allclose
(
selected_bboxes
.
tensor
,
expected_selected_bboxes
,
1e-3
)
assert
torch
.
allclose
(
selected_scores
,
expected_selected_scores
,
1e-3
)
...
...
@@ -387,43 +386,43 @@ def test_multi_class_nms():
box_preds
=
torch
.
Tensor
(
[[
5.6217e+01
,
2.5908e+01
,
-
1.3611e+00
,
1.6025e+00
,
3.6730e+00
,
1.5129e+00
,
-
1.1786e-01
1.5129e+00
,
1.1786e-01
],
[
5.4653e+01
,
2.8885e+01
,
-
1.9145e+00
,
1.6362e+00
,
4.0574e+00
,
1.5599e+00
,
-
1.7335e+00
1.5599e+00
,
1.7335e+00
],
[
5.5809e+01
,
2.5686e+01
,
-
1.4457e+00
,
1.5939e+00
,
3.8270e+00
,
1.4997e+00
,
-
2.9191e+00
1.4997e+00
,
2.9191e+00
],
[
5.6107e+01
,
2.6082e+01
,
-
1.3557e+00
,
1.5782e+00
,
3.7444e+00
,
1.5266e+00
,
1.7707e-01
1.5266e+00
,
-
1.7707e-01
],
[
3.1618e+01
,
-
5.6004e+00
,
-
1.2470e+00
,
1.6459e+00
,
4.1622e+00
,
1.5632e+00
,
-
1.5734e+00
1.5632e+00
,
1.5734e+00
],
[
3.1605e+01
,
-
5.6342e+00
,
-
1.2467e+00
,
1.6474e+00
,
4.1519e+00
,
1.5481e+00
,
-
1.6313e+00
1.5481e+00
,
1.6313e+00
],
[
5.6211e+01
,
2.7294e+01
,
-
1.5350e+00
,
1.5422e+00
,
3.7733e+00
,
1.5140e+00
,
9.5846e-02
1.5140e+00
,
-
9.5846e-02
],
[
5.5907e+01
,
2.7155e+01
,
-
1.4712e+00
,
1.5416e+00
,
3.7611e+00
,
1.5142e+00
,
-
5.2059e-02
1.5142e+00
,
5.2059e-02
],
[
5.4000e+01
,
3.0585e+01
,
-
1.6874e+00
,
1.6495e+00
,
4.0376e+00
,
1.5554e+00
,
-
1.7900e+00
1.5554e+00
,
1.7900e+00
],
[
5.6007e+01
,
2.6300e+01
,
-
1.3945e+00
,
1.5716e+00
,
3.7064e+00
,
1.4715e+00
,
-
2.9639e+00
1.4715e+00
,
2.9639e+00
]]).
cuda
()
input_meta
=
dict
(
...
...
tests/test_models/test_heads/test_roi_extractors.py
View file @
32a4328b
# Copyright (c) OpenMMLab. All rights reserved.
import
numpy
as
np
import
pytest
import
torch
from
mmdet3d.models.roi_heads.roi_extractors
import
Single3DRoIAwareExtractor
from
mmdet3d.models.roi_heads.roi_extractors
import
(
Single3DRoIAwareExtractor
,
Single3DRoIPointExtractor
)
def
test_single_roiaware_extractor
():
...
...
@@ -21,11 +23,35 @@ def test_single_roiaware_extractor():
dtype
=
torch
.
float32
).
cuda
()
coordinate
=
feats
.
clone
()
batch_inds
=
torch
.
zeros
(
feats
.
shape
[
0
]).
cuda
()
rois
=
torch
.
tensor
([[
0
,
1.0
,
2.0
,
3.0
,
4
.0
,
5
.0
,
6.0
,
0.3
],
[
0
,
-
10.0
,
23.0
,
16.0
,
10
,
20
,
20
,
0.5
]],
rois
=
torch
.
tensor
([[
0
,
1.0
,
2.0
,
3.0
,
5
.0
,
4
.0
,
6.0
,
-
0.3
-
np
.
pi
/
2
],
[
0
,
-
10.0
,
23.0
,
16.0
,
20
,
10
,
20
,
-
0.5
-
np
.
pi
/
2
]],
dtype
=
torch
.
float32
).
cuda
()
# test forward
pooled_feats
=
self
(
feats
,
coordinate
,
batch_inds
,
rois
)
assert
pooled_feats
.
shape
==
torch
.
Size
([
2
,
4
,
4
,
4
,
3
])
assert
torch
.
allclose
(
pooled_feats
.
sum
(),
torch
.
tensor
(
51.100
).
cuda
(),
1e-3
)
def
test_single_roipoint_extractor
():
if
not
torch
.
cuda
.
is_available
():
pytest
.
skip
(
'test requires GPU and torch+cuda'
)
roi_layer_cfg
=
dict
(
type
=
'RoIPointPool3d'
,
num_sampled_points
=
512
,
pool_extra_width
=
0
)
self
=
Single3DRoIPointExtractor
(
roi_layer
=
roi_layer_cfg
)
feats
=
torch
.
tensor
(
[[
1
,
2
,
3.3
],
[
1.2
,
2.5
,
3.0
],
[
0.8
,
2.1
,
3.5
],
[
1.6
,
2.6
,
3.6
],
[
0.8
,
1.2
,
3.9
],
[
-
9.2
,
21.0
,
18.2
],
[
3.8
,
7.9
,
6.3
],
[
4.7
,
3.5
,
-
12.2
],
[
3.8
,
7.6
,
-
2
],
[
-
10.6
,
-
12.9
,
-
20
],
[
-
16
,
-
18
,
9
],
[
-
21.3
,
-
52
,
-
5
],
[
0
,
0
,
0
],
[
6
,
7
,
8
],
[
-
2
,
-
3
,
-
4
]],
dtype
=
torch
.
float32
).
unsqueeze
(
0
).
cuda
()
points
=
feats
.
clone
()
batch_inds
=
feats
.
shape
[
0
]
rois
=
torch
.
tensor
([[
0
,
1.0
,
2.0
,
3.0
,
4.0
,
5.0
,
6.0
,
0.3
],
[
0
,
-
10.0
,
23.0
,
16.0
,
10
,
20
,
20
,
0.5
]],
dtype
=
torch
.
float32
).
cuda
()
pooled_feats
=
self
(
feats
,
points
,
batch_inds
,
rois
)
assert
pooled_feats
.
shape
==
torch
.
Size
([
2
,
512
,
6
])
tests/test_models/test_heads/test_semantic_heads.py
View file @
32a4328b
...
...
@@ -53,11 +53,11 @@ def test_PointwiseSemanticHead():
gt_bboxes
=
[
LiDARInstance3DBoxes
(
torch
.
tensor
(
[[
6.4118
,
-
3.4305
,
-
1.7291
,
1.7033
,
3.4693
,
1.6197
,
-
0.9091
]],
[[
6.4118
,
-
3.4305
,
-
1.7291
,
1.7033
,
3.4693
,
1.6197
,
0.9091
]],
dtype
=
torch
.
float32
).
cuda
()),
LiDARInstance3DBoxes
(
torch
.
tensor
(
[[
16.9107
,
9.7925
,
-
1.9201
,
1.6097
,
3.2786
,
1.5307
,
-
2.4056
]],
[[
16.9107
,
9.7925
,
-
1.9201
,
1.6097
,
3.2786
,
1.5307
,
2.4056
]],
dtype
=
torch
.
float32
).
cuda
())
]
# batch size is 2 in the unit test
...
...
tests/test_models/test_necks/test_necks.py
View file @
32a4328b
...
...
@@ -57,3 +57,78 @@ def test_imvoxel_neck():
inputs
=
torch
.
rand
([
1
,
64
,
216
,
248
,
12
],
device
=
'cuda'
)
outputs
=
neck
(
inputs
)
assert
outputs
[
0
].
shape
==
(
1
,
256
,
248
,
216
)
def
test_fp_neck
():
if
not
torch
.
cuda
.
is_available
():
pytest
.
skip
()
xyzs
=
[
16384
,
4096
,
1024
,
256
,
64
]
feat_channels
=
[
1
,
96
,
256
,
512
,
1024
]
channel_num
=
5
sa_xyz
=
[
torch
.
rand
(
3
,
xyzs
[
i
],
3
)
for
i
in
range
(
channel_num
)]
sa_features
=
[
torch
.
rand
(
3
,
feat_channels
[
i
],
xyzs
[
i
])
for
i
in
range
(
channel_num
)
]
neck_cfg
=
dict
(
type
=
'PointNetFPNeck'
,
fp_channels
=
((
1536
,
512
,
512
),
(
768
,
512
,
512
),
(
608
,
256
,
256
),
(
257
,
128
,
128
)))
neck
=
build_neck
(
neck_cfg
)
neck
.
init_weights
()
if
torch
.
cuda
.
is_available
():
sa_xyz
=
[
x
.
cuda
()
for
x
in
sa_xyz
]
sa_features
=
[
x
.
cuda
()
for
x
in
sa_features
]
neck
.
cuda
()
feats_sa
=
{
'sa_xyz'
:
sa_xyz
,
'sa_features'
:
sa_features
}
outputs
=
neck
(
feats_sa
)
assert
outputs
[
'fp_xyz'
].
cpu
().
numpy
().
shape
==
(
3
,
16384
,
3
)
assert
outputs
[
'fp_features'
].
detach
().
cpu
().
numpy
().
shape
==
(
3
,
128
,
16384
)
def
test_dla_neck
():
s
=
32
in_channels
=
[
16
,
32
,
64
,
128
,
256
,
512
]
feat_sizes
=
[
s
//
2
**
i
for
i
in
range
(
6
)]
# [32, 16, 8, 4, 2, 1]
if
torch
.
cuda
.
is_available
():
# Test DLA Neck with DCNv2 on GPU
neck_cfg
=
dict
(
type
=
'DLANeck'
,
in_channels
=
[
16
,
32
,
64
,
128
,
256
,
512
],
start_level
=
2
,
end_level
=
5
,
norm_cfg
=
dict
(
type
=
'GN'
,
num_groups
=
32
))
neck
=
build_neck
(
neck_cfg
)
neck
.
init_weights
()
neck
.
cuda
()
feats
=
[
torch
.
rand
(
4
,
in_channels
[
i
],
feat_sizes
[
i
],
feat_sizes
[
i
]).
cuda
()
for
i
in
range
(
len
(
in_channels
))
]
outputs
=
neck
(
feats
)
assert
outputs
.
shape
==
(
4
,
64
,
8
,
8
)
else
:
# Test DLA Neck without DCNv2 on CPU
neck_cfg
=
dict
(
type
=
'DLANeck'
,
in_channels
=
[
16
,
32
,
64
,
128
,
256
,
512
],
start_level
=
2
,
end_level
=
5
,
norm_cfg
=
dict
(
type
=
'GN'
,
num_groups
=
32
),
use_dcn
=
False
)
neck
=
build_neck
(
neck_cfg
)
neck
.
init_weights
()
feats
=
[
torch
.
rand
(
4
,
in_channels
[
i
],
feat_sizes
[
i
],
feat_sizes
[
i
])
for
i
in
range
(
len
(
in_channels
))
]
outputs
=
neck
(
feats
)
assert
outputs
[
0
].
shape
==
(
4
,
64
,
8
,
8
)
tests/test_models/test_segmentors.py
View file @
32a4328b
# Copyright (c) OpenMMLab. All rights reserved.
import
copy
from
os.path
import
dirname
,
exists
,
join
import
numpy
as
np
import
pytest
import
torch
from
os.path
import
dirname
,
exists
,
join
from
mmdet3d.models.builder
import
build_segmentor
from
mmdet.apis
import
set_random_seed
...
...
@@ -304,3 +305,48 @@ def test_paconv_cuda_ssg():
results
=
self
.
forward
(
return_loss
=
False
,
**
data_dict
)
assert
results
[
0
][
'semantic_mask'
].
shape
==
torch
.
Size
([
200
])
assert
results
[
1
][
'semantic_mask'
].
shape
==
torch
.
Size
([
100
])
def
test_dgcnn
():
if
not
torch
.
cuda
.
is_available
():
pytest
.
skip
(
'test requires GPU and torch+cuda'
)
set_random_seed
(
0
,
True
)
dgcnn_cfg
=
_get_segmentor_cfg
(
'dgcnn/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class.py'
)
dgcnn_cfg
.
test_cfg
.
num_points
=
32
self
=
build_segmentor
(
dgcnn_cfg
).
cuda
()
points
=
[
torch
.
rand
(
4096
,
9
).
float
().
cuda
()
for
_
in
range
(
2
)]
img_metas
=
[
dict
(),
dict
()]
gt_masks
=
[
torch
.
randint
(
0
,
13
,
(
4096
,
)).
long
().
cuda
()
for
_
in
range
(
2
)]
# test forward_train
losses
=
self
.
forward_train
(
points
,
img_metas
,
gt_masks
)
assert
losses
[
'decode.loss_sem_seg'
].
item
()
>=
0
# test loss with ignore_index
ignore_masks
=
[
torch
.
ones_like
(
gt_masks
[
0
])
*
13
for
_
in
range
(
2
)]
losses
=
self
.
forward_train
(
points
,
img_metas
,
ignore_masks
)
assert
losses
[
'decode.loss_sem_seg'
].
item
()
==
0
# test simple_test
self
.
eval
()
with
torch
.
no_grad
():
scene_points
=
[
torch
.
randn
(
500
,
6
).
float
().
cuda
()
*
3.0
,
torch
.
randn
(
200
,
6
).
float
().
cuda
()
*
2.5
]
results
=
self
.
simple_test
(
scene_points
,
img_metas
)
assert
results
[
0
][
'semantic_mask'
].
shape
==
torch
.
Size
([
500
])
assert
results
[
1
][
'semantic_mask'
].
shape
==
torch
.
Size
([
200
])
# test aug_test
with
torch
.
no_grad
():
scene_points
=
[
torch
.
randn
(
2
,
500
,
6
).
float
().
cuda
()
*
3.0
,
torch
.
randn
(
2
,
200
,
6
).
float
().
cuda
()
*
2.5
]
img_metas
=
[[
dict
(),
dict
()],
[
dict
(),
dict
()]]
results
=
self
.
aug_test
(
scene_points
,
img_metas
)
assert
results
[
0
][
'semantic_mask'
].
shape
==
torch
.
Size
([
500
])
assert
results
[
1
][
'semantic_mask'
].
shape
==
torch
.
Size
([
200
])
tests/test_runtime/test_apis.py
View file @
32a4328b
# Copyright (c) OpenMMLab. All rights reserved.
import
numpy
as
np
import
os
import
pytest
import
tempfile
from
os.path
import
dirname
,
exists
,
join
import
numpy
as
np
import
pytest
import
torch
from
mmcv.parallel
import
MMDataParallel
from
os.path
import
dirname
,
exists
,
join
from
mmdet3d.apis
import
(
convert_SyncBN
,
inference_detector
,
inference_mono_3d_detector
,
...
...
tests/test_runtime/test_config.py
View file @
32a4328b
...
...
@@ -61,6 +61,8 @@ def test_config_build_model():
check_parta2_roi_head
(
head_config
,
detector
.
roi_head
)
elif
head_config
.
type
==
'H3DRoIHead'
:
check_h3d_roi_head
(
head_config
,
detector
.
roi_head
)
elif
head_config
.
type
==
'PointRCNNRoIHead'
:
check_pointrcnn_roi_head
(
head_config
,
detector
.
roi_head
)
else
:
_check_roi_head
(
head_config
,
detector
.
roi_head
)
# else:
...
...
@@ -273,3 +275,28 @@ def _check_h3d_bbox_head(bbox_cfg, bbox_head):
12
==
bbox_head
.
line_center_matcher
.
num_point
[
0
]
assert
bbox_cfg
.
suface_matching_cfg
.
mlp_channels
[
-
1
]
*
\
18
==
bbox_head
.
bbox_pred
[
0
].
in_channels
def
check_pointrcnn_roi_head
(
config
,
head
):
assert
config
[
'type'
]
==
head
.
__class__
.
__name__
# check point_roi_extractor
point_roi_cfg
=
config
.
point_roi_extractor
point_roi_extractor
=
head
.
point_roi_extractor
_check_pointrcnn_roi_extractor
(
point_roi_cfg
,
point_roi_extractor
)
# check pointrcnn rcnn bboxhead
bbox_cfg
=
config
.
bbox_head
bbox_head
=
head
.
bbox_head
_check_pointrcnn_bbox_head
(
bbox_cfg
,
bbox_head
)
def
_check_pointrcnn_roi_extractor
(
config
,
roi_extractor
):
assert
config
[
'type'
]
==
roi_extractor
.
__class__
.
__name__
assert
config
.
roi_layer
.
num_sampled_points
==
\
roi_extractor
.
roi_layer
.
num_sampled_points
def
_check_pointrcnn_bbox_head
(
bbox_cfg
,
bbox_head
):
assert
bbox_cfg
[
'type'
]
==
bbox_head
.
__class__
.
__name__
assert
bbox_cfg
.
num_classes
==
bbox_head
.
num_classes
assert
bbox_cfg
.
with_corner_loss
==
bbox_head
.
with_corner_loss
tests/test_utils/test_anchors.py
View file @
32a4328b
...
...
@@ -22,7 +22,7 @@ def test_anchor_3d_range_generator():
[
0
,
-
39.68
,
-
0.6
,
70.4
,
39.68
,
-
0.6
],
[
0
,
-
39.68
,
-
1.78
,
70.4
,
39.68
,
-
1.78
],
],
sizes
=
[[
0.
6
,
0.
8
,
1.73
],
[
0.6
,
1.7
6
,
1.73
],
[
1.6
,
3.9
,
1.56
]],
sizes
=
[[
0.
8
,
0.
6
,
1.73
],
[
1.76
,
0.
6
,
1.73
],
[
3.9
,
1.6
,
1.56
]],
rotations
=
[
0
,
1.57
],
reshape_out
=
False
)
...
...
@@ -32,8 +32,8 @@ def test_anchor_3d_range_generator():
'[[0, -39.68, -0.6, 70.4, 39.68, -0.6], '
\
'[0, -39.68, -0.6, 70.4, 39.68, -0.6], '
\
'[0, -39.68, -1.78, 70.4, 39.68, -1.78]],'
\
'
\n
scales=[1],
\n
sizes=[[0.
6
, 0.
8
, 1.73], '
\
'[
0.6, 1.7
6, 1.73], [
1.6, 3.9
, 1.56]],'
\
'
\n
scales=[1],
\n
sizes=[[0.
8
, 0.
6
, 1.73], '
\
'[
1.76, 0.
6, 1.73], [
3.9, 1.6
, 1.56]],'
\
'
\n
rotations=[0, 1.57],
\n
reshape_out=False,'
\
'
\n
size_per_range=True)'
assert
repr_str
==
expected_repr_str
...
...
@@ -54,8 +54,8 @@ def test_aligned_anchor_generator():
ranges
=
[[
-
51.2
,
-
51.2
,
-
1.80
,
51.2
,
51.2
,
-
1.80
]],
scales
=
[
1
,
2
,
4
],
sizes
=
[
[
0.8660
,
2.5981
,
1.
],
# 1.5/sqrt(3)
[
0.5774
,
1.7321
,
1.
],
# 1/sqrt(3)
[
2.5981
,
0.8660
,
1.
],
# 1.5/sqrt(3)
[
1.7321
,
0.5774
,
1.
],
# 1/sqrt(3)
[
1.
,
1.
,
1.
],
[
0.4
,
0.4
,
1
],
],
...
...
@@ -71,7 +71,7 @@ def test_aligned_anchor_generator():
# check base anchors
expected_grid_anchors
=
[
torch
.
tensor
([[
-
51.0000
,
-
51.0000
,
-
1.8000
,
0.8660
,
2.5981
,
1.0000
,
0.0000
,
-
51.0000
,
-
51.0000
,
-
1.8000
,
2.5981
,
0.8660
,
1.0000
,
0.0000
,
0.0000
,
0.0000
],
[
...
...
@@ -91,20 +91,20 @@ def test_aligned_anchor_generator():
0.0000
,
0.0000
,
0.0000
],
[
-
49.4000
,
-
51.0000
,
-
1.8000
,
0.5774
,
1.7321
,
1.0000
,
-
49.4000
,
-
51.0000
,
-
1.8000
,
1.7321
,
0.5774
,
1.0000
,
1.5700
,
0.0000
,
0.0000
],
[
-
49.0000
,
-
51.0000
,
-
1.8000
,
0.5774
,
1.7321
,
1.0000
,
-
49.0000
,
-
51.0000
,
-
1.8000
,
1.7321
,
0.5774
,
1.0000
,
0.0000
,
0.0000
,
0.0000
],
[
-
48.6000
,
-
51.0000
,
-
1.8000
,
0.8660
,
2.5981
,
1.0000
,
-
48.6000
,
-
51.0000
,
-
1.8000
,
2.5981
,
0.8660
,
1.0000
,
1.5700
,
0.0000
,
0.0000
]],
device
=
device
),
torch
.
tensor
([[
-
50.8000
,
-
50.8000
,
-
1.8000
,
1.7320
,
5.1962
,
2.0000
,
0.0000
,
-
50.8000
,
-
50.8000
,
-
1.8000
,
5.1962
,
1.7320
,
2.0000
,
0.0000
,
0.0000
,
0.0000
],
[
...
...
@@ -124,20 +124,20 @@ def test_aligned_anchor_generator():
0.0000
,
0.0000
,
0.0000
],
[
-
47.6000
,
-
50.8000
,
-
1.8000
,
1.1548
,
3.4642
,
2.0000
,
-
47.6000
,
-
50.8000
,
-
1.8000
,
3.4642
,
1.1548
,
2.0000
,
1.5700
,
0.0000
,
0.0000
],
[
-
46.8000
,
-
50.8000
,
-
1.8000
,
1.1548
,
3.4642
,
2.0000
,
-
46.8000
,
-
50.8000
,
-
1.8000
,
3.4642
,
1.1548
,
2.0000
,
0.0000
,
0.0000
,
0.0000
],
[
-
46.0000
,
-
50.8000
,
-
1.8000
,
1.7320
,
5.1962
,
2.0000
,
-
46.0000
,
-
50.8000
,
-
1.8000
,
5.1962
,
1.7320
,
2.0000
,
1.5700
,
0.0000
,
0.0000
]],
device
=
device
),
torch
.
tensor
([[
-
50.4000
,
-
50.4000
,
-
1.8000
,
3.4640
,
10.3924
,
4.0000
,
0.0000
,
-
50.4000
,
-
50.4000
,
-
1.8000
,
10.3924
,
3.4640
,
4.0000
,
0.0000
,
0.0000
,
0.0000
],
[
...
...
@@ -157,15 +157,15 @@ def test_aligned_anchor_generator():
0.0000
,
0.0000
,
0.0000
],
[
-
44.0000
,
-
50.4000
,
-
1.8000
,
2.3096
,
6.9284
,
4.0000
,
-
44.0000
,
-
50.4000
,
-
1.8000
,
6.9284
,
2.3096
,
4.0000
,
1.5700
,
0.0000
,
0.0000
],
[
-
42.4000
,
-
50.4000
,
-
1.8000
,
2.3096
,
6.9284
,
4.0000
,
-
42.4000
,
-
50.4000
,
-
1.8000
,
6.9284
,
2.3096
,
4.0000
,
0.0000
,
0.0000
,
0.0000
],
[
-
40.8000
,
-
50.4000
,
-
1.8000
,
3.4640
,
10.3924
,
4.0000
,
-
40.8000
,
-
50.4000
,
-
1.8000
,
10.3924
,
3.4640
,
4.0000
,
1.5700
,
0.0000
,
0.0000
]],
device
=
device
)
...
...
@@ -194,7 +194,7 @@ def test_aligned_anchor_generator_per_cls():
type
=
'AlignedAnchor3DRangeGeneratorPerCls'
,
ranges
=
[[
-
100
,
-
100
,
-
1.80
,
100
,
100
,
-
1.80
],
[
-
100
,
-
100
,
-
1.30
,
100
,
100
,
-
1.30
]],
sizes
=
[[
0.63
,
1.76
,
1.44
],
[
0.96
,
2.35
,
1.59
]],
sizes
=
[[
1.76
,
0.63
,
1.44
],
[
2.35
,
0.96
,
1.59
]],
custom_values
=
[
0
,
0
],
rotations
=
[
0
,
1.57
],
reshape_out
=
False
)
...
...
@@ -205,20 +205,20 @@ def test_aligned_anchor_generator_per_cls():
# check base anchors
expected_grid_anchors
=
[[
torch
.
tensor
([[
-
99.0000
,
-
99.0000
,
-
1.8000
,
0.6300
,
1.76
00
,
1.4400
,
0.0000
,
-
99.0000
,
-
99.0000
,
-
1.8000
,
1.7600
,
0.63
00
,
1.4400
,
0.0000
,
0.0000
,
0.0000
],
[
-
99.0000
,
-
99.0000
,
-
1.8000
,
0.6300
,
1.76
00
,
1.4400
,
-
99.0000
,
-
99.0000
,
-
1.8000
,
1.7600
,
0.63
00
,
1.4400
,
1.5700
,
0.0000
,
0.0000
]],
device
=
device
),
torch
.
tensor
([[
-
98.0000
,
-
98.0000
,
-
1.3000
,
0.9600
,
2.35
00
,
1.5900
,
0.0000
,
-
98.0000
,
-
98.0000
,
-
1.3000
,
2.3500
,
0.96
00
,
1.5900
,
0.0000
,
0.0000
,
0.0000
],
[
-
98.0000
,
-
98.0000
,
-
1.3000
,
0.9600
,
2.35
00
,
1.5900
,
-
98.0000
,
-
98.0000
,
-
1.3000
,
2.3500
,
0.96
00
,
1.5900
,
1.5700
,
0.0000
,
0.0000
]],
device
=
device
)
...
...
tests/test_utils/test_bbox_coders.py
View file @
32a4328b
# Copyright (c) OpenMMLab. All rights reserved.
import
numpy
as
np
import
torch
from
mmcv.cnn
import
Scale
from
torch
import
nn
as
nn
from
mmdet3d.core.bbox
import
DepthInstance3DBoxes
,
LiDARInstance3DBoxes
from
mmdet3d.core.bbox
import
(
CameraInstance3DBoxes
,
DepthInstance3DBoxes
,
LiDARInstance3DBoxes
)
from
mmdet.core
import
build_bbox_coder
...
...
@@ -352,3 +356,310 @@ def test_centerpoint_bbox_coder():
assert
temp
[
i
][
'bboxes'
].
shape
==
torch
.
Size
([
500
,
9
])
assert
temp
[
i
][
'scores'
].
shape
==
torch
.
Size
([
500
])
assert
temp
[
i
][
'labels'
].
shape
==
torch
.
Size
([
500
])
def
test_point_xyzwhlr_bbox_coder
():
bbox_coder_cfg
=
dict
(
type
=
'PointXYZWHLRBBoxCoder'
,
use_mean_size
=
True
,
mean_size
=
[[
3.9
,
1.6
,
1.56
],
[
0.8
,
0.6
,
1.73
],
[
1.76
,
0.6
,
1.73
]])
boxcoder
=
build_bbox_coder
(
bbox_coder_cfg
)
# test encode
gt_bboxes_3d
=
torch
.
tensor
(
[[
13.3329
,
2.3514
,
-
0.7004
,
1.7508
,
0.4702
,
1.7909
,
-
3.0522
],
[
2.2068
,
-
2.6994
,
-
0.3277
,
3.8703
,
1.6602
,
1.6913
,
-
1.9057
],
[
5.5269
,
2.5085
,
-
1.0129
,
1.1496
,
0.8006
,
1.8887
,
2.1756
]])
points
=
torch
.
tensor
([[
13.70
,
2.40
,
0.12
],
[
3.20
,
-
3.00
,
0.2
],
[
5.70
,
2.20
,
-
0.4
]])
gt_labels_3d
=
torch
.
tensor
([
2
,
0
,
1
])
bbox_target
=
boxcoder
.
encode
(
gt_bboxes_3d
,
points
,
gt_labels_3d
)
expected_bbox_target
=
torch
.
tensor
([[
-
0.1974
,
-
0.0261
,
-
0.4742
,
-
0.0052
,
-
0.2438
,
0.0346
,
-
0.9960
,
-
0.0893
],
[
-
0.2356
,
0.0713
,
-
0.3383
,
-
0.0076
,
0.0369
,
0.0808
,
-
0.3287
,
-
0.9444
],
[
-
0.1731
,
0.3085
,
-
0.3543
,
0.3626
,
0.2884
,
0.0878
,
-
0.5686
,
0.8226
]])
assert
torch
.
allclose
(
expected_bbox_target
,
bbox_target
,
atol
=
1e-4
)
# test decode
bbox3d_out
=
boxcoder
.
decode
(
bbox_target
,
points
,
gt_labels_3d
)
assert
torch
.
allclose
(
bbox3d_out
,
gt_bboxes_3d
,
atol
=
1e-4
)
def
test_fcos3d_bbox_coder
():
# test a config without priors
bbox_coder_cfg
=
dict
(
type
=
'FCOS3DBBoxCoder'
,
base_depths
=
None
,
base_dims
=
None
,
code_size
=
7
,
norm_on_bbox
=
True
)
bbox_coder
=
build_bbox_coder
(
bbox_coder_cfg
)
# test decode
# [2, 7, 1, 1]
batch_bbox
=
torch
.
tensor
([[[[
0.3130
]],
[[
0.7094
]],
[[
0.8743
]],
[[
0.0570
]],
[[
0.5579
]],
[[
0.1593
]],
[[
0.4553
]]],
[[[
0.7758
]],
[[
0.2298
]],
[[
0.3925
]],
[[
0.6307
]],
[[
0.4377
]],
[[
0.3339
]],
[[
0.1966
]]]])
batch_scale
=
nn
.
ModuleList
([
Scale
(
1.0
)
for
_
in
range
(
3
)])
stride
=
2
training
=
False
cls_score
=
torch
.
randn
([
2
,
2
,
1
,
1
]).
sigmoid
()
decode_bbox
=
bbox_coder
.
decode
(
batch_bbox
,
batch_scale
,
stride
,
training
,
cls_score
)
expected_bbox
=
torch
.
tensor
([[[[
0.6261
]],
[[
1.4188
]],
[[
2.3971
]],
[[
1.0586
]],
[[
1.7470
]],
[[
1.1727
]],
[[
0.4553
]]],
[[[
1.5516
]],
[[
0.4596
]],
[[
1.4806
]],
[[
1.8790
]],
[[
1.5492
]],
[[
1.3965
]],
[[
0.1966
]]]])
assert
torch
.
allclose
(
decode_bbox
,
expected_bbox
,
atol
=
1e-3
)
# test a config with priors
prior_bbox_coder_cfg
=
dict
(
type
=
'FCOS3DBBoxCoder'
,
base_depths
=
((
28.
,
13.
),
(
25.
,
12.
)),
base_dims
=
((
2.
,
3.
,
1.
),
(
1.
,
2.
,
3.
)),
code_size
=
7
,
norm_on_bbox
=
True
)
prior_bbox_coder
=
build_bbox_coder
(
prior_bbox_coder_cfg
)
# test decode
batch_bbox
=
torch
.
tensor
([[[[
0.3130
]],
[[
0.7094
]],
[[
0.8743
]],
[[
0.0570
]],
[[
0.5579
]],
[[
0.1593
]],
[[
0.4553
]]],
[[[
0.7758
]],
[[
0.2298
]],
[[
0.3925
]],
[[
0.6307
]],
[[
0.4377
]],
[[
0.3339
]],
[[
0.1966
]]]])
batch_scale
=
nn
.
ModuleList
([
Scale
(
1.0
)
for
_
in
range
(
3
)])
stride
=
2
training
=
False
cls_score
=
torch
.
tensor
([[[[
0.5811
]],
[[
0.6198
]]],
[[[
0.4889
]],
[[
0.8142
]]]])
decode_bbox
=
prior_bbox_coder
.
decode
(
batch_bbox
,
batch_scale
,
stride
,
training
,
cls_score
)
expected_bbox
=
torch
.
tensor
([[[[
0.6260
]],
[[
1.4188
]],
[[
35.4916
]],
[[
1.0587
]],
[[
3.4940
]],
[[
3.5181
]],
[[
0.4553
]]],
[[[
1.5516
]],
[[
0.4596
]],
[[
29.7100
]],
[[
1.8789
]],
[[
3.0983
]],
[[
4.1892
]],
[[
0.1966
]]]])
assert
torch
.
allclose
(
decode_bbox
,
expected_bbox
,
atol
=
1e-3
)
# test decode_yaw
decode_bbox
=
decode_bbox
.
permute
(
0
,
2
,
3
,
1
).
view
(
-
1
,
7
)
batch_centers2d
=
torch
.
tensor
([[
100.
,
150.
],
[
200.
,
100.
]])
batch_dir_cls
=
torch
.
tensor
([
0.
,
1.
])
dir_offset
=
0.7854
cam2img
=
torch
.
tensor
([[
700.
,
0.
,
450.
,
0.
],
[
0.
,
700.
,
200.
,
0.
],
[
0.
,
0.
,
1.
,
0.
],
[
0.
,
0.
,
0.
,
1.
]])
decode_bbox
=
prior_bbox_coder
.
decode_yaw
(
decode_bbox
,
batch_centers2d
,
batch_dir_cls
,
dir_offset
,
cam2img
)
expected_bbox
=
torch
.
tensor
(
[[
0.6260
,
1.4188
,
35.4916
,
1.0587
,
3.4940
,
3.5181
,
3.1332
],
[
1.5516
,
0.4596
,
29.7100
,
1.8789
,
3.0983
,
4.1892
,
6.1368
]])
assert
torch
.
allclose
(
decode_bbox
,
expected_bbox
,
atol
=
1e-3
)
def
test_pgd_bbox_coder
():
# test a config without priors
bbox_coder_cfg
=
dict
(
type
=
'PGDBBoxCoder'
,
base_depths
=
None
,
base_dims
=
None
,
code_size
=
7
,
norm_on_bbox
=
True
)
bbox_coder
=
build_bbox_coder
(
bbox_coder_cfg
)
# test decode_2d
# [2, 27, 1, 1]
batch_bbox
=
torch
.
tensor
([[[[
0.0103
]],
[[
0.7394
]],
[[
0.3296
]],
[[
0.4708
]],
[[
0.1439
]],
[[
0.0778
]],
[[
0.9399
]],
[[
0.8366
]],
[[
0.1264
]],
[[
0.3030
]],
[[
0.1898
]],
[[
0.0714
]],
[[
0.4144
]],
[[
0.4341
]],
[[
0.6442
]],
[[
0.2951
]],
[[
0.2890
]],
[[
0.4486
]],
[[
0.2848
]],
[[
0.1071
]],
[[
0.9530
]],
[[
0.9460
]],
[[
0.3822
]],
[[
0.9320
]],
[[
0.2611
]],
[[
0.5580
]],
[[
0.0397
]]],
[[[
0.8612
]],
[[
0.1680
]],
[[
0.5167
]],
[[
0.8502
]],
[[
0.0377
]],
[[
0.3615
]],
[[
0.9550
]],
[[
0.5219
]],
[[
0.1402
]],
[[
0.6843
]],
[[
0.2121
]],
[[
0.9468
]],
[[
0.6238
]],
[[
0.7918
]],
[[
0.1646
]],
[[
0.0500
]],
[[
0.6290
]],
[[
0.3956
]],
[[
0.2901
]],
[[
0.4612
]],
[[
0.7333
]],
[[
0.1194
]],
[[
0.6999
]],
[[
0.3980
]],
[[
0.3262
]],
[[
0.7185
]],
[[
0.4474
]]]])
batch_scale
=
nn
.
ModuleList
([
Scale
(
1.0
)
for
_
in
range
(
5
)])
stride
=
2
training
=
False
cls_score
=
torch
.
randn
([
2
,
2
,
1
,
1
]).
sigmoid
()
decode_bbox
=
bbox_coder
.
decode
(
batch_bbox
,
batch_scale
,
stride
,
training
,
cls_score
)
max_regress_range
=
16
pred_keypoints
=
True
pred_bbox2d
=
True
decode_bbox_w2d
=
bbox_coder
.
decode_2d
(
decode_bbox
,
batch_scale
,
stride
,
max_regress_range
,
training
,
pred_keypoints
,
pred_bbox2d
)
expected_decode_bbox_w2d
=
torch
.
tensor
(
[[[[
0.0206
]],
[[
1.4788
]],
[[
1.3904
]],
[[
1.6013
]],
[[
1.1548
]],
[[
1.0809
]],
[[
0.9399
]],
[[
10.9441
]],
[[
2.0117
]],
[[
4.7049
]],
[[
3.0009
]],
[[
1.1405
]],
[[
6.2752
]],
[[
6.5399
]],
[[
9.0840
]],
[[
4.5892
]],
[[
4.4994
]],
[[
6.7320
]],
[[
4.4375
]],
[[
1.7071
]],
[[
11.8582
]],
[[
11.8075
]],
[[
5.8339
]],
[[
1.8640
]],
[[
0.5222
]],
[[
1.1160
]],
[[
0.0794
]]],
[[[
1.7224
]],
[[
0.3360
]],
[[
1.6765
]],
[[
2.3401
]],
[[
1.0384
]],
[[
1.4355
]],
[[
0.9550
]],
[[
7.6666
]],
[[
2.2286
]],
[[
9.5089
]],
[[
3.3436
]],
[[
11.8133
]],
[[
8.8603
]],
[[
10.5508
]],
[[
2.6101
]],
[[
0.7993
]],
[[
8.9178
]],
[[
6.0188
]],
[[
4.5156
]],
[[
6.8970
]],
[[
10.0013
]],
[[
1.9014
]],
[[
9.6689
]],
[[
0.7960
]],
[[
0.6524
]],
[[
1.4370
]],
[[
0.8948
]]]])
assert
torch
.
allclose
(
expected_decode_bbox_w2d
,
decode_bbox_w2d
,
atol
=
1e-3
)
# test decode_prob_depth
# [10, 8]
depth_cls_preds
=
torch
.
tensor
([
[
-
0.4383
,
0.7207
,
-
0.4092
,
0.4649
,
0.8526
,
0.6186
,
-
1.4312
,
-
0.7150
],
[
0.0621
,
0.2369
,
0.5170
,
0.8484
,
-
0.1099
,
0.1829
,
-
0.0072
,
1.0618
],
[
-
1.6114
,
-
0.1057
,
0.5721
,
-
0.5986
,
-
2.0471
,
0.8140
,
-
0.8385
,
-
0.4822
],
[
0.0742
,
-
0.3261
,
0.4607
,
1.8155
,
-
0.3571
,
-
0.0234
,
0.3787
,
2.3251
],
[
1.0492
,
-
0.6881
,
-
0.0136
,
-
1.8291
,
0.8460
,
-
1.0171
,
2.5691
,
-
0.8114
],
[
0.0968
,
-
0.5601
,
1.0458
,
0.2560
,
1.3018
,
0.1635
,
0.0680
,
-
1.0263
],
[
-
0.0765
,
0.1498
,
-
2.7321
,
1.0047
,
-
0.2505
,
0.0871
,
-
0.4820
,
-
0.3003
],
[
-
0.4123
,
0.2298
,
-
0.1330
,
-
0.6008
,
0.6526
,
0.7118
,
0.9728
,
-
0.7793
],
[
1.6940
,
0.3355
,
1.4661
,
0.5477
,
0.8667
,
0.0527
,
-
0.9975
,
-
0.0689
],
[
0.4724
,
-
0.3632
,
-
0.0654
,
0.4034
,
-
0.3494
,
-
0.7548
,
0.7297
,
1.2754
]
])
depth_range
=
(
0
,
70
)
depth_unit
=
10
num_depth_cls
=
8
uniform_prob_depth_preds
=
bbox_coder
.
decode_prob_depth
(
depth_cls_preds
,
depth_range
,
depth_unit
,
'uniform'
,
num_depth_cls
)
expected_preds
=
torch
.
tensor
([
32.0441
,
38.4689
,
36.1831
,
48.2096
,
46.1560
,
32.7973
,
33.2155
,
39.9822
,
21.9905
,
43.0161
])
assert
torch
.
allclose
(
uniform_prob_depth_preds
,
expected_preds
,
atol
=
1e-3
)
linear_prob_depth_preds
=
bbox_coder
.
decode_prob_depth
(
depth_cls_preds
,
depth_range
,
depth_unit
,
'linear'
,
num_depth_cls
)
expected_preds
=
torch
.
tensor
([
21.1431
,
30.2421
,
25.8964
,
41.6116
,
38.6234
,
21.4582
,
23.2993
,
30.1111
,
13.9273
,
36.8419
])
assert
torch
.
allclose
(
linear_prob_depth_preds
,
expected_preds
,
atol
=
1e-3
)
log_prob_depth_preds
=
bbox_coder
.
decode_prob_depth
(
depth_cls_preds
,
depth_range
,
depth_unit
,
'log'
,
num_depth_cls
)
expected_preds
=
torch
.
tensor
([
12.6458
,
24.2487
,
17.4015
,
36.9375
,
27.5982
,
12.5510
,
15.6635
,
19.8408
,
9.1605
,
31.3765
])
assert
torch
.
allclose
(
log_prob_depth_preds
,
expected_preds
,
atol
=
1e-3
)
loguniform_prob_depth_preds
=
bbox_coder
.
decode_prob_depth
(
depth_cls_preds
,
depth_range
,
depth_unit
,
'loguniform'
,
num_depth_cls
)
expected_preds
=
torch
.
tensor
([
6.9925
,
10.3273
,
8.9895
,
18.6524
,
16.4667
,
7.3196
,
7.5078
,
11.3207
,
3.7987
,
13.6095
])
assert
torch
.
allclose
(
loguniform_prob_depth_preds
,
expected_preds
,
atol
=
1e-3
)
def
test_smoke_bbox_coder
():
bbox_coder_cfg
=
dict
(
type
=
'SMOKECoder'
,
base_depth
=
(
28.01
,
16.32
),
base_dims
=
((
3.88
,
1.63
,
1.53
),
(
1.78
,
1.70
,
0.58
),
(
0.88
,
1.73
,
0.67
)),
code_size
=
7
)
bbox_coder
=
build_bbox_coder
(
bbox_coder_cfg
)
regression
=
torch
.
rand
([
200
,
8
])
points
=
torch
.
rand
([
200
,
2
])
labels
=
torch
.
ones
([
2
,
100
])
cam2imgs
=
torch
.
rand
([
2
,
4
,
4
])
trans_mats
=
torch
.
rand
([
2
,
3
,
3
])
img_metas
=
[
dict
(
box_type_3d
=
CameraInstance3DBoxes
)
for
i
in
range
(
2
)]
locations
,
dimensions
,
orientations
=
bbox_coder
.
decode
(
regression
,
points
,
labels
,
cam2imgs
,
trans_mats
)
assert
locations
.
shape
==
torch
.
Size
([
200
,
3
])
assert
dimensions
.
shape
==
torch
.
Size
([
200
,
3
])
assert
orientations
.
shape
==
torch
.
Size
([
200
,
1
])
bboxes
=
bbox_coder
.
encode
(
locations
,
dimensions
,
orientations
,
img_metas
)
assert
bboxes
.
tensor
.
shape
==
torch
.
Size
([
200
,
7
])
# specically designed to test orientation decode function's
# special cases.
ori_vector
=
torch
.
tensor
([[
-
0.9
,
-
0.01
],
[
-
0.9
,
0.01
]])
locations
=
torch
.
tensor
([[
15.
,
2.
,
1.
],
[
15.
,
2.
,
-
1.
]])
orientations
=
bbox_coder
.
_decode_orientation
(
ori_vector
,
locations
)
assert
orientations
.
shape
==
torch
.
Size
([
2
,
1
])
def
test_monoflex_bbox_coder
():
bbox_coder_cfg
=
dict
(
type
=
'MonoFlexCoder'
,
depth_mode
=
'exp'
,
base_depth
=
(
26.494627
,
16.05988
),
depth_range
=
[
0.1
,
100
],
combine_depth
=
True
,
uncertainty_range
=
[
-
10
,
10
],
base_dims
=
((
3.8840
,
1.5261
,
1.6286
,
0.4259
,
0.1367
,
0.1022
),
(
0.8423
,
1.7607
,
0.6602
,
0.2349
,
0.1133
,
0.1427
),
(
1.7635
,
1.7372
,
0.5968
,
0.1766
,
0.0948
,
0.1242
)),
dims_mode
=
'linear'
,
multibin
=
True
,
num_dir_bins
=
4
,
bin_centers
=
[
0
,
np
.
pi
/
2
,
np
.
pi
,
-
np
.
pi
/
2
],
bin_margin
=
np
.
pi
/
6
,
code_size
=
7
)
bbox_coder
=
build_bbox_coder
(
bbox_coder_cfg
)
gt_bboxes_3d
=
CameraInstance3DBoxes
(
torch
.
rand
([
6
,
7
]))
orientation_target
=
bbox_coder
.
encode
(
gt_bboxes_3d
)
assert
orientation_target
.
shape
==
torch
.
Size
([
6
,
8
])
regression
=
torch
.
rand
([
100
,
50
])
base_centers2d
=
torch
.
rand
([
100
,
2
])
labels
=
torch
.
ones
([
100
])
downsample_ratio
=
4
cam2imgs
=
torch
.
rand
([
100
,
4
,
4
])
preds
=
bbox_coder
.
decode
(
regression
,
base_centers2d
,
labels
,
downsample_ratio
,
cam2imgs
)
assert
preds
[
'bboxes2d'
].
shape
==
torch
.
Size
([
100
,
4
])
assert
preds
[
'dimensions'
].
shape
==
torch
.
Size
([
100
,
3
])
assert
preds
[
'offsets2d'
].
shape
==
torch
.
Size
([
100
,
2
])
assert
preds
[
'keypoints2d'
].
shape
==
torch
.
Size
([
100
,
10
,
2
])
assert
preds
[
'orientations'
].
shape
==
torch
.
Size
([
100
,
16
])
assert
preds
[
'direct_depth'
].
shape
==
torch
.
Size
([
100
,
])
assert
preds
[
'keypoints_depth'
].
shape
==
torch
.
Size
([
100
,
3
])
assert
preds
[
'combined_depth'
].
shape
==
torch
.
Size
([
100
,
])
assert
preds
[
'direct_depth_uncertainty'
].
shape
==
torch
.
Size
([
100
,
])
assert
preds
[
'keypoints_depth_uncertainty'
].
shape
==
torch
.
Size
([
100
,
3
])
offsets_2d
=
torch
.
randn
([
100
,
2
])
depths
=
torch
.
randn
([
100
,
])
locations
=
bbox_coder
.
decode_location
(
base_centers2d
,
offsets_2d
,
depths
,
cam2imgs
,
downsample_ratio
)
assert
locations
.
shape
==
torch
.
Size
([
100
,
3
])
orientations
=
torch
.
randn
([
100
,
16
])
yaws
,
local_yaws
=
bbox_coder
.
decode_orientation
(
orientations
,
locations
)
assert
yaws
.
shape
==
torch
.
Size
([
100
,
])
assert
local_yaws
.
shape
==
torch
.
Size
([
100
,
])
tests/test_utils/test_box3d.py
View file @
32a4328b
# Copyright (c) OpenMMLab. All rights reserved.
import
unittest
import
numpy
as
np
import
pytest
import
torch
import
unittest
from
mmdet3d.core.bbox
import
(
BaseInstance3DBoxes
,
Box3DMode
,
CameraInstance3DBoxes
,
DepthInstance3DBoxes
,
LiDAR
Instance3DBoxes
,
bbox3d2roi
,
bbox3d_mapping_back
)
CameraInstance3DBoxes
,
Coord3DMode
,
Depth
Instance3DBoxes
,
LiDARInstance3DBoxes
,
bbox3d2roi
,
bbox3d_mapping_back
)
from
mmdet3d.core.bbox.structures.utils
import
(
get_box_type
,
limit_period
,
points_cam2img
,
rotation_3d_in_axis
,
...
...
@@ -140,10 +141,15 @@ def test_lidar_boxes3d():
assert
torch
.
allclose
(
expected_tensor
,
bottom_center_box
.
tensor
)
# Test init with numpy array
np_boxes
=
np
.
array
(
[[
1.7802081
,
2.516249
,
-
1.7501148
,
1.75
,
3.39
,
1.65
,
1.48
],
[
8.959413
,
2.4567227
,
-
1.6357126
,
1.54
,
4.01
,
1.57
,
1.62
]],
dtype
=
np
.
float32
)
np_boxes
=
np
.
array
([[
1.7802081
,
2.516249
,
-
1.7501148
,
1.75
,
3.39
,
1.65
,
1.48
-
0.13603681398218053
*
4
],
[
8.959413
,
2.4567227
,
-
1.6357126
,
1.54
,
4.01
,
1.57
,
1.62
-
0.13603681398218053
*
4
]],
dtype
=
np
.
float32
)
boxes_1
=
LiDARInstance3DBoxes
(
np_boxes
)
assert
torch
.
allclose
(
boxes_1
.
tensor
,
torch
.
from_numpy
(
np_boxes
))
...
...
@@ -157,15 +163,15 @@ def test_lidar_boxes3d():
th_boxes
=
torch
.
tensor
(
[[
28.29669987
,
-
0.5557558
,
-
1.30332506
,
1.47000003
,
2.23000002
,
1.48000002
,
-
1.57000005
1.48000002
,
-
1.57000005
-
0.13603681398218053
*
4
],
[
26.66901946
,
21.82302134
,
-
1.73605708
,
1.55999994
,
3.48000002
,
1.39999998
,
-
1.69000006
1.39999998
,
-
1.69000006
-
0.13603681398218053
*
4
],
[
31.31977974
,
8.16214412
,
-
1.62177875
,
1.74000001
,
3.76999998
,
1.48000002
,
2.78999996
1.48000002
,
2.78999996
-
0.13603681398218053
*
4
]],
dtype
=
torch
.
float32
)
boxes_2
=
LiDARInstance3DBoxes
(
th_boxes
)
...
...
@@ -176,12 +182,30 @@ def test_lidar_boxes3d():
boxes_1
=
boxes_1
.
to
(
boxes_2
.
device
)
# test box concatenation
expected_tensor
=
torch
.
tensor
(
[[
1.7802081
,
2.516249
,
-
1.7501148
,
1.75
,
3.39
,
1.65
,
1.48
],
[
8.959413
,
2.4567227
,
-
1.6357126
,
1.54
,
4.01
,
1.57
,
1.62
],
[
28.2967
,
-
0.5557558
,
-
1.303325
,
1.47
,
2.23
,
1.48
,
-
1.57
],
[
26.66902
,
21.82302
,
-
1.736057
,
1.56
,
3.48
,
1.4
,
-
1.69
],
[
31.31978
,
8.162144
,
-
1.6217787
,
1.74
,
3.77
,
1.48
,
2.79
]])
expected_tensor
=
torch
.
tensor
([[
1.7802081
,
2.516249
,
-
1.7501148
,
1.75
,
3.39
,
1.65
,
1.48
-
0.13603681398218053
*
4
],
[
8.959413
,
2.4567227
,
-
1.6357126
,
1.54
,
4.01
,
1.57
,
1.62
-
0.13603681398218053
*
4
],
[
28.2967
,
-
0.5557558
,
-
1.303325
,
1.47
,
2.23
,
1.48
,
-
1.57
-
0.13603681398218053
*
4
],
[
26.66902
,
21.82302
,
-
1.736057
,
1.56
,
3.48
,
1.4
,
-
1.69
-
0.13603681398218053
*
4
],
[
31.31978
,
8.162144
,
-
1.6217787
,
1.74
,
3.77
,
1.48
,
2.79
-
0.13603681398218053
*
4
]])
boxes
=
LiDARInstance3DBoxes
.
cat
([
boxes_1
,
boxes_2
])
assert
torch
.
allclose
(
boxes
.
tensor
,
expected_tensor
)
# concatenate empty list
...
...
@@ -196,11 +220,26 @@ def test_lidar_boxes3d():
[
0.6533
,
-
0.5520
,
-
0.5265
],
[
4.5870
,
0.5358
,
-
1.4741
]])
expected_tensor
=
torch
.
tensor
(
[[
1.7802081
,
-
2.516249
,
-
1.7501148
,
1.75
,
3.39
,
1.65
,
1.6615927
],
[
8.959413
,
-
2.4567227
,
-
1.6357126
,
1.54
,
4.01
,
1.57
,
1.5215927
],
[
28.2967
,
0.5557558
,
-
1.303325
,
1.47
,
2.23
,
1.48
,
4.7115927
],
[
26.66902
,
-
21.82302
,
-
1.736057
,
1.56
,
3.48
,
1.4
,
4.8315926
],
[
31.31978
,
-
8.162144
,
-
1.6217787
,
1.74
,
3.77
,
1.48
,
0.35159278
]])
[[
1.7802081
,
-
2.516249
,
-
1.7501148
,
1.75
,
3.39
,
1.65
,
1.6615927
-
np
.
pi
+
0.13603681398218053
*
4
],
[
8.959413
,
-
2.4567227
,
-
1.6357126
,
1.54
,
4.01
,
1.57
,
1.5215927
-
np
.
pi
+
0.13603681398218053
*
4
],
[
28.2967
,
0.5557558
,
-
1.303325
,
1.47
,
2.23
,
1.48
,
4.7115927
-
np
.
pi
+
0.13603681398218053
*
4
],
[
26.66902
,
-
21.82302
,
-
1.736057
,
1.56
,
3.48
,
1.4
,
4.8315926
-
np
.
pi
+
0.13603681398218053
*
4
],
[
31.31978
,
-
8.162144
,
-
1.6217787
,
1.74
,
3.77
,
1.48
,
0.35159278
-
np
.
pi
+
0.13603681398218053
*
4
]])
expected_points
=
torch
.
tensor
([[
1.2559
,
0.6762
,
-
1.4658
],
[
4.7814
,
0.8784
,
-
1.3857
],
[
6.7053
,
-
0.2517
,
-
0.9697
],
...
...
@@ -211,11 +250,26 @@ def test_lidar_boxes3d():
assert
torch
.
allclose
(
points
,
expected_points
,
1e-3
)
expected_tensor
=
torch
.
tensor
(
[[
-
1.7802
,
-
2.5162
,
-
1.7501
,
1.7500
,
3.3900
,
1.6500
,
-
1.6616
],
[
-
8.9594
,
-
2.4567
,
-
1.6357
,
1.5400
,
4.0100
,
1.5700
,
-
1.5216
],
[
-
28.2967
,
0.5558
,
-
1.3033
,
1.4700
,
2.2300
,
1.4800
,
-
4.7116
],
[
-
26.6690
,
-
21.8230
,
-
1.7361
,
1.5600
,
3.4800
,
1.4000
,
-
4.8316
],
[
-
31.3198
,
-
8.1621
,
-
1.6218
,
1.7400
,
3.7700
,
1.4800
,
-
0.3516
]])
[[
-
1.7802
,
-
2.5162
,
-
1.7501
,
1.7500
,
3.3900
,
1.6500
,
-
1.6616
+
np
.
pi
*
2
-
0.13603681398218053
*
4
],
[
-
8.9594
,
-
2.4567
,
-
1.6357
,
1.5400
,
4.0100
,
1.5700
,
-
1.5216
+
np
.
pi
*
2
-
0.13603681398218053
*
4
],
[
-
28.2967
,
0.5558
,
-
1.3033
,
1.4700
,
2.2300
,
1.4800
,
-
4.7116
+
np
.
pi
*
2
-
0.13603681398218053
*
4
],
[
-
26.6690
,
-
21.8230
,
-
1.7361
,
1.5600
,
3.4800
,
1.4000
,
-
4.8316
+
np
.
pi
*
2
-
0.13603681398218053
*
4
],
[
-
31.3198
,
-
8.1621
,
-
1.6218
,
1.7400
,
3.7700
,
1.4800
,
-
0.3516
+
np
.
pi
*
2
-
0.13603681398218053
*
4
]])
boxes_flip_vert
=
boxes
.
clone
()
points
=
boxes_flip_vert
.
flip
(
'vertical'
,
points
)
expected_points
=
torch
.
tensor
([[
-
1.2559
,
0.6762
,
-
1.4658
],
...
...
@@ -229,12 +283,27 @@ def test_lidar_boxes3d():
# test box rotation
# with input torch.Tensor points and angle
expected_tensor
=
torch
.
tensor
(
[[
1.4225
,
-
2.7344
,
-
1.7501
,
1.7500
,
3.3900
,
1.6500
,
1.7976
],
[
8.5435
,
-
3.6491
,
-
1.6357
,
1.5400
,
4.0100
,
1.5700
,
1.6576
],
[
28.1106
,
-
3.2869
,
-
1.3033
,
1.4700
,
2.2300
,
1.4800
,
4.8476
],
[
23.4630
,
-
25.2382
,
-
1.7361
,
1.5600
,
3.4800
,
1.4000
,
4.9676
],
[
29.9235
,
-
12.3342
,
-
1.6218
,
1.7400
,
3.7700
,
1.4800
,
0.4876
]])
points
,
rot_mat_T
=
boxes
.
rotate
(
0.13603681398218053
,
points
)
[[
1.4225
,
-
2.7344
,
-
1.7501
,
1.7500
,
3.3900
,
1.6500
,
1.7976
-
np
.
pi
+
0.13603681398218053
*
2
],
[
8.5435
,
-
3.6491
,
-
1.6357
,
1.5400
,
4.0100
,
1.5700
,
1.6576
-
np
.
pi
+
0.13603681398218053
*
2
],
[
28.1106
,
-
3.2869
,
-
1.3033
,
1.4700
,
2.2300
,
1.4800
,
4.8476
-
np
.
pi
+
0.13603681398218053
*
2
],
[
23.4630
,
-
25.2382
,
-
1.7361
,
1.5600
,
3.4800
,
1.4000
,
4.9676
-
np
.
pi
+
0.13603681398218053
*
2
],
[
29.9235
,
-
12.3342
,
-
1.6218
,
1.7400
,
3.7700
,
1.4800
,
0.4876
-
np
.
pi
+
0.13603681398218053
*
2
]])
points
,
rot_mat_T
=
boxes
.
rotate
(
-
0.13603681398218053
,
points
)
expected_points
=
torch
.
tensor
([[
-
1.1526
,
0.8403
,
-
1.4658
],
[
-
4.6181
,
1.5187
,
-
1.3857
],
[
-
6.6775
,
0.6600
,
-
0.9697
],
...
...
@@ -248,7 +317,7 @@ def test_lidar_boxes3d():
assert
torch
.
allclose
(
rot_mat_T
,
expected_rot_mat_T
,
1e-3
)
# with input torch.Tensor points and rotation matrix
points
,
rot_mat_T
=
boxes
.
rotate
(
-
0.13603681398218053
,
points
)
# back
points
,
rot_mat_T
=
boxes
.
rotate
(
0.13603681398218053
,
points
)
# back
rot_mat
=
np
.
array
([[
0.99076125
,
-
0.13561762
,
0.
],
[
0.13561762
,
0.99076125
,
0.
],
[
0.
,
0.
,
1.
]])
points
,
rot_mat_T
=
boxes
.
rotate
(
rot_mat
,
points
)
...
...
@@ -262,7 +331,7 @@ def test_lidar_boxes3d():
[
-
6.5263
,
1.5595
,
-
0.9697
],
[
-
0.4809
,
0.7073
,
-
0.5265
],
[
-
4.5623
,
0.7166
,
-
1.4741
]])
points_np
,
rot_mat_T_np
=
boxes
.
rotate
(
0.13603681398218053
,
points_np
)
points_np
,
rot_mat_T_np
=
boxes
.
rotate
(
-
0.13603681398218053
,
points_np
)
expected_points_np
=
np
.
array
([[
-
0.8844
,
1.1191
,
-
1.4658
],
[
-
4.0401
,
2.7039
,
-
1.3857
],
[
-
6.2545
,
2.4302
,
-
0.9697
],
...
...
@@ -276,7 +345,7 @@ def test_lidar_boxes3d():
assert
np
.
allclose
(
rot_mat_T_np
,
expected_rot_mat_T_np
,
1e-3
)
# with input LiDARPoints and rotation matrix
points_np
,
rot_mat_T_np
=
boxes
.
rotate
(
-
0.13603681398218053
,
points_np
)
points_np
,
rot_mat_T_np
=
boxes
.
rotate
(
0.13603681398218053
,
points_np
)
lidar_points
=
LiDARPoints
(
points_np
)
lidar_points
,
rot_mat_T_np
=
boxes
.
rotate
(
rot_mat
,
lidar_points
)
points_np
=
lidar_points
.
tensor
.
numpy
()
...
...
@@ -287,27 +356,27 @@ def test_lidar_boxes3d():
# test box scaling
expected_tensor
=
torch
.
tensor
([[
1.0443488
,
-
2.9183323
,
-
1.7599131
,
1.7597977
,
3.4089797
,
1.6592377
,
1.9336663
1.9336663
-
np
.
pi
],
[
8.014273
,
-
4.8007393
,
-
1.6448704
,
1.5486219
,
4.0324507
,
1.57879
,
1.7936664
1.7936664
-
np
.
pi
],
[
27.558605
,
-
7.1084175
,
-
1.310622
,
1.4782301
,
2.242485
,
1.488286
,
4.9836664
4.9836664
-
np
.
pi
],
[
19.934517
,
-
28.344835
,
-
1.7457767
,
1.5687338
,
3.4994833
,
1.4078381
,
5.1036663
5.1036663
-
np
.
pi
],
[
28.130915
,
-
16.369587
,
-
1.6308585
,
1.7497417
,
3.791107
,
1.488286
,
0.6236664
0.6236664
-
np
.
pi
]])
boxes
.
scale
(
1.00559866335275
)
assert
torch
.
allclose
(
boxes
.
tensor
,
expected_tensor
)
...
...
@@ -315,32 +384,39 @@ def test_lidar_boxes3d():
# test box translation
expected_tensor
=
torch
.
tensor
([[
1.1281544
,
-
3.0507944
,
-
1.9169292
,
1.7597977
,
3.4089797
,
1.6592377
,
1.9336663
1.9336663
-
np
.
pi
],
[
8.098079
,
-
4.9332013
,
-
1.8018866
,
1.5486219
,
4.0324507
,
1.57879
,
1.7936664
1.7936664
-
np
.
pi
],
[
27.64241
,
-
7.2408795
,
-
1.4676381
,
1.4782301
,
2.242485
,
1.488286
,
4.9836664
4.9836664
-
np
.
pi
],
[
20.018322
,
-
28.477297
,
-
1.9027928
,
1.5687338
,
3.4994833
,
1.4078381
,
5.1036663
5.1036663
-
np
.
pi
],
[
28.21472
,
-
16.502048
,
-
1.7878747
,
1.7497417
,
3.791107
,
1.488286
,
0.6236664
0.6236664
-
np
.
pi
]])
boxes
.
translate
([
0.0838056
,
-
0.13246193
,
-
0.15701613
])
assert
torch
.
allclose
(
boxes
.
tensor
,
expected_tensor
)
# test bbox in_range_bev
expected_tensor
=
torch
.
tensor
(
[[
1.1282
,
-
3.0508
,
1.7598
,
3.4090
,
-
1.2079
],
[
8.0981
,
-
4.9332
,
1.5486
,
4.0325
,
-
1.3479
],
[
27.6424
,
-
7.2409
,
1.4782
,
2.2425
,
1.8421
],
[
20.0183
,
-
28.4773
,
1.5687
,
3.4995
,
1.9621
],
[
28.2147
,
-
16.5020
,
1.7497
,
3.7911
,
-
2.5179
]])
assert
torch
.
allclose
(
boxes
.
bev
,
expected_tensor
,
atol
=
1e-3
)
expected_tensor
=
torch
.
tensor
([
1
,
1
,
1
,
1
,
1
],
dtype
=
torch
.
bool
)
mask
=
boxes
.
in_range_bev
([
0.
,
-
40.
,
70.4
,
40.
])
assert
(
mask
==
expected_tensor
).
all
()
...
...
@@ -356,17 +432,17 @@ def test_lidar_boxes3d():
index_boxes
=
boxes
[
2
:
5
]
expected_tensor
=
torch
.
tensor
([[
27.64241
,
-
7.2408795
,
-
1.4676381
,
1.4782301
,
2.242485
,
1.488286
,
4.9836664
4.9836664
-
np
.
pi
],
[
20.018322
,
-
28.477297
,
-
1.9027928
,
1.5687338
,
3.4994833
,
1.4078381
,
5.1036663
5.1036663
-
np
.
pi
],
[
28.21472
,
-
16.502048
,
-
1.7878747
,
1.7497417
,
3.791107
,
1.488286
,
0.6236664
0.6236664
-
np
.
pi
]])
assert
len
(
index_boxes
)
==
3
assert
torch
.
allclose
(
index_boxes
.
tensor
,
expected_tensor
)
...
...
@@ -374,7 +450,7 @@ def test_lidar_boxes3d():
index_boxes
=
boxes
[
2
]
expected_tensor
=
torch
.
tensor
([[
27.64241
,
-
7.2408795
,
-
1.4676381
,
1.4782301
,
2.242485
,
1.488286
,
4.9836664
4.9836664
-
np
.
pi
]])
assert
len
(
index_boxes
)
==
1
assert
torch
.
allclose
(
index_boxes
.
tensor
,
expected_tensor
)
...
...
@@ -382,12 +458,12 @@ def test_lidar_boxes3d():
index_boxes
=
boxes
[[
2
,
4
]]
expected_tensor
=
torch
.
tensor
([[
27.64241
,
-
7.2408795
,
-
1.4676381
,
1.4782301
,
2.242485
,
1.488286
,
4.9836664
4.9836664
-
np
.
pi
],
[
28.21472
,
-
16.502048
,
-
1.7878747
,
1.7497417
,
3.791107
,
1.488286
,
0.6236664
0.6236664
-
np
.
pi
]])
assert
len
(
index_boxes
)
==
2
assert
torch
.
allclose
(
index_boxes
.
tensor
,
expected_tensor
)
...
...
@@ -408,13 +484,13 @@ def test_lidar_boxes3d():
assert
(
boxes
.
tensor
[:,
6
]
>=
-
np
.
pi
/
2
).
all
()
Box3DMode
.
convert
(
boxes
,
Box3DMode
.
LIDAR
,
Box3DMode
.
LIDAR
)
expected_tesor
=
boxes
.
tensor
.
clone
()
assert
torch
.
allclose
(
expected_tesor
,
boxes
.
tensor
)
expected_te
n
sor
=
boxes
.
tensor
.
clone
()
assert
torch
.
allclose
(
expected_te
n
sor
,
boxes
.
tensor
)
boxes
.
flip
()
boxes
.
flip
()
boxes
.
limit_yaw
()
assert
torch
.
allclose
(
expected_tesor
,
boxes
.
tensor
)
assert
torch
.
allclose
(
expected_te
n
sor
,
boxes
.
tensor
)
# test nearest_bev
expected_tensor
=
torch
.
tensor
([[
-
0.5763
,
-
3.9307
,
2.8326
,
-
2.1709
],
...
...
@@ -422,52 +498,50 @@ def test_lidar_boxes3d():
[
26.5212
,
-
7.9800
,
28.7637
,
-
6.5018
],
[
18.2686
,
-
29.2617
,
21.7681
,
-
27.6929
],
[
27.3398
,
-
18.3976
,
29.0896
,
-
14.6065
]])
# the pytorch print loses some precision
assert
torch
.
allclose
(
boxes
.
nearest_bev
,
expected_tensor
,
rtol
=
1e-4
,
atol
=
1e-7
)
# obtained by the print of the original implementation
expected_tensor
=
torch
.
tensor
([[[
2.4093e+00
,
-
4.4784e+00
,
-
1.9169e+00
],
[
2.4093e+00
,
-
4.4784e+00
,
-
2.5769e-01
],
[
-
7.7767e-01
,
-
3.2684e+00
,
-
2.5769e-01
],
[
-
7.7767e-01
,
-
3.2684e+00
,
-
1.9169e+00
],
[
3.0340e+00
,
-
2.8332e+00
,
-
1.9169e+00
],
[
3.0340e+00
,
-
2.8332e+00
,
-
2.5769e-01
],
[
-
1.5301e-01
,
-
1.6232e+00
,
-
2.5769e-01
],
[
-
1.5301e-01
,
-
1.6232e+00
,
-
1.9169e+00
]],
[[
9.8933e+00
,
-
6.1340e+00
,
-
1.8019e+00
],
[
9.8933e+00
,
-
6.1340e+00
,
-
2.2310e-01
],
[
5.9606e+00
,
-
5.2427e+00
,
-
2.2310e-01
],
[
5.9606e+00
,
-
5.2427e+00
,
-
1.8019e+00
],
[
1.0236e+01
,
-
4.6237e+00
,
-
1.8019e+00
],
[
1.0236e+01
,
-
4.6237e+00
,
-
2.2310e-01
],
[
6.3029e+00
,
-
3.7324e+00
,
-
2.2310e-01
],
[
6.3029e+00
,
-
3.7324e+00
,
-
1.8019e+00
]],
[[
2.8525e+01
,
-
8.2534e+00
,
-
1.4676e+00
],
[
2.8525e+01
,
-
8.2534e+00
,
2.0648e-02
],
[
2.6364e+01
,
-
7.6525e+00
,
2.0648e-02
],
[
2.6364e+01
,
-
7.6525e+00
,
-
1.4676e+00
],
[
2.8921e+01
,
-
6.8292e+00
,
-
1.4676e+00
],
[
2.8921e+01
,
-
6.8292e+00
,
2.0648e-02
],
[
2.6760e+01
,
-
6.2283e+00
,
2.0648e-02
],
[
2.6760e+01
,
-
6.2283e+00
,
-
1.4676e+00
]],
[[
2.1337e+01
,
-
2.9870e+01
,
-
1.9028e+00
],
[
2.1337e+01
,
-
2.9870e+01
,
-
4.9495e-01
],
[
1.8102e+01
,
-
2.8535e+01
,
-
4.9495e-01
],
[
1.8102e+01
,
-
2.8535e+01
,
-
1.9028e+00
],
[
2.1935e+01
,
-
2.8420e+01
,
-
1.9028e+00
],
[
2.1935e+01
,
-
2.8420e+01
,
-
4.9495e-01
],
[
1.8700e+01
,
-
2.7085e+01
,
-
4.9495e-01
],
[
1.8700e+01
,
-
2.7085e+01
,
-
1.9028e+00
]],
[[
2.6398e+01
,
-
1.7530e+01
,
-
1.7879e+00
],
[
2.6398e+01
,
-
1.7530e+01
,
-
2.9959e-01
],
[
2.8612e+01
,
-
1.4452e+01
,
-
2.9959e-01
],
[
2.8612e+01
,
-
1.4452e+01
,
-
1.7879e+00
],
[
2.7818e+01
,
-
1.8552e+01
,
-
1.7879e+00
],
[
2.7818e+01
,
-
1.8552e+01
,
-
2.9959e-01
],
[
3.0032e+01
,
-
1.5474e+01
,
-
2.9959e-01
],
[
3.0032e+01
,
-
1.5474e+01
,
-
1.7879e+00
]]])
# the pytorch print loses some precision
expected_tensor
=
torch
.
tensor
([[[
-
7.7767e-01
,
-
2.8332e+00
,
-
1.9169e+00
],
[
-
7.7767e-01
,
-
2.8332e+00
,
-
2.5769e-01
],
[
2.4093e+00
,
-
1.6232e+00
,
-
2.5769e-01
],
[
2.4093e+00
,
-
1.6232e+00
,
-
1.9169e+00
],
[
-
1.5301e-01
,
-
4.4784e+00
,
-
1.9169e+00
],
[
-
1.5301e-01
,
-
4.4784e+00
,
-
2.5769e-01
],
[
3.0340e+00
,
-
3.2684e+00
,
-
2.5769e-01
],
[
3.0340e+00
,
-
3.2684e+00
,
-
1.9169e+00
]],
[[
5.9606e+00
,
-
4.6237e+00
,
-
1.8019e+00
],
[
5.9606e+00
,
-
4.6237e+00
,
-
2.2310e-01
],
[
9.8933e+00
,
-
3.7324e+00
,
-
2.2310e-01
],
[
9.8933e+00
,
-
3.7324e+00
,
-
1.8019e+00
],
[
6.3029e+00
,
-
6.1340e+00
,
-
1.8019e+00
],
[
6.3029e+00
,
-
6.1340e+00
,
-
2.2310e-01
],
[
1.0236e+01
,
-
5.2427e+00
,
-
2.2310e-01
],
[
1.0236e+01
,
-
5.2427e+00
,
-
1.8019e+00
]],
[[
2.6364e+01
,
-
6.8292e+00
,
-
1.4676e+00
],
[
2.6364e+01
,
-
6.8292e+00
,
2.0648e-02
],
[
2.8525e+01
,
-
6.2283e+00
,
2.0648e-02
],
[
2.8525e+01
,
-
6.2283e+00
,
-
1.4676e+00
],
[
2.6760e+01
,
-
8.2534e+00
,
-
1.4676e+00
],
[
2.6760e+01
,
-
8.2534e+00
,
2.0648e-02
],
[
2.8921e+01
,
-
7.6525e+00
,
2.0648e-02
],
[
2.8921e+01
,
-
7.6525e+00
,
-
1.4676e+00
]],
[[
1.8102e+01
,
-
2.8420e+01
,
-
1.9028e+00
],
[
1.8102e+01
,
-
2.8420e+01
,
-
4.9495e-01
],
[
2.1337e+01
,
-
2.7085e+01
,
-
4.9495e-01
],
[
2.1337e+01
,
-
2.7085e+01
,
-
1.9028e+00
],
[
1.8700e+01
,
-
2.9870e+01
,
-
1.9028e+00
],
[
1.8700e+01
,
-
2.9870e+01
,
-
4.9495e-01
],
[
2.1935e+01
,
-
2.8535e+01
,
-
4.9495e-01
],
[
2.1935e+01
,
-
2.8535e+01
,
-
1.9028e+00
]],
[[
2.8612e+01
,
-
1.8552e+01
,
-
1.7879e+00
],
[
2.8612e+01
,
-
1.8552e+01
,
-
2.9959e-01
],
[
2.6398e+01
,
-
1.5474e+01
,
-
2.9959e-01
],
[
2.6398e+01
,
-
1.5474e+01
,
-
1.7879e+00
],
[
3.0032e+01
,
-
1.7530e+01
,
-
1.7879e+00
],
[
3.0032e+01
,
-
1.7530e+01
,
-
2.9959e-01
],
[
2.7818e+01
,
-
1.4452e+01
,
-
2.9959e-01
],
[
2.7818e+01
,
-
1.4452e+01
,
-
1.7879e+00
]]])
assert
torch
.
allclose
(
boxes
.
corners
,
expected_tensor
,
rtol
=
1e-4
,
atol
=
1e-7
)
# test new_box
...
...
@@ -558,31 +632,32 @@ def test_boxes_conversion():
[
0.000000e+00
,
0.000000e+00
,
0.000000e+00
,
1.000000e+00
]],
dtype
=
torch
.
float32
)
# coord sys refactor (reverse sign of yaw)
expected_tensor
=
torch
.
tensor
(
[[
2.16902434e+01
,
-
4.06038554e-02
,
-
1.61906639e+00
,
1.65999997
e+00
,
3.20000005
e+00
,
1.61000001e+00
,
-
1.53999996e+00
2.16902434e+01
,
-
4.06038554e-02
,
-
1.61906639e+00
,
3.20000005
e+00
,
1.65999997
e+00
,
1.61000001e+00
,
1.53999996e+00
-
np
.
pi
/
2
],
[
7.05006905e+00
,
-
6.57459601e+00
,
-
1.60107949e+00
,
2
.27
9
99997e+0
0
,
1
.27
7
99997e+0
1
,
3.66000009e+00
,
1.54999995e+00
7.05006905e+00
,
-
6.57459601e+00
,
-
1.60107949e+00
,
1
.27
7
99997e+0
1
,
2
.27
9
99997e+0
0
,
3.66000009e+00
,
-
1.54999995e+00
-
np
.
pi
/
2
],
[
2.24698818e+01
,
-
6.69203759e+00
,
-
1.50118145e+00
,
2.319
9999
3
e+0
0
,
1.472
9999
5
e+0
1
,
3.64000010e+00
,
1.59000003e+00
2.24698818e+01
,
-
6.69203759e+00
,
-
1.50118145e+00
,
1.472
9999
5
e+0
1
,
2.319
9999
3
e+0
0
,
3.64000010e+00
,
-
1.59000003e+00
+
3
*
np
.
pi
/
2
],
[
3.48291965e+01
,
-
7.09058388e+00
,
-
1.36622983e+00
,
2.31999993
e+0
0
,
1.00400000
e+0
1
,
3.60999990e+00
,
1.61000001e+00
3.48291965e+01
,
-
7.09058388e+00
,
-
1.36622983e+00
,
1.00400000
e+0
1
,
2.31999993
e+0
0
,
3.60999990e+00
,
-
1.61000001e+00
+
3
*
np
.
pi
/
2
],
[
4.62394617e+01
,
-
7.75838800e+00
,
-
1.32405020e+00
,
2.33
99999
1
e+0
0
,
1.282
99999e+0
1
,
3.63000011e+00
,
1.63999999e+00
4.62394617e+01
,
-
7.75838800e+00
,
-
1.32405020e+00
,
1.282
99999e+0
1
,
2.33
99999
1
e+0
0
,
3.63000011e+00
,
-
1.63999999e+00
+
3
*
np
.
pi
/
2
]],
dtype
=
torch
.
float32
)
rt_mat
=
rect
@
Trv2c
# test coversion with Box type
# test co
n
version with Box type
cam_to_lidar_box
=
Box3DMode
.
convert
(
camera_boxes
,
Box3DMode
.
CAM
,
Box3DMode
.
LIDAR
,
rt_mat
.
inverse
())
assert
torch
.
allclose
(
cam_to_lidar_box
.
tensor
,
expected_tensor
)
...
...
@@ -637,10 +712,15 @@ def test_boxes_conversion():
def
test_camera_boxes3d
():
# Test init with numpy array
np_boxes
=
np
.
array
(
[[
1.7802081
,
2.516249
,
-
1.7501148
,
1.75
,
3.39
,
1.65
,
1.48
],
[
8.959413
,
2.4567227
,
-
1.6357126
,
1.54
,
4.01
,
1.57
,
1.62
]],
dtype
=
np
.
float32
)
np_boxes
=
np
.
array
([[
1.7802081
,
2.516249
,
-
1.7501148
,
1.75
,
3.39
,
1.65
,
1.48
-
0.13603681398218053
*
4
-
2
*
np
.
pi
],
[
8.959413
,
2.4567227
,
-
1.6357126
,
1.54
,
4.01
,
1.57
,
1.62
-
0.13603681398218053
*
4
-
2
*
np
.
pi
]],
dtype
=
np
.
float32
)
boxes_1
=
Box3DMode
.
convert
(
LiDARInstance3DBoxes
(
np_boxes
),
Box3DMode
.
LIDAR
,
Box3DMode
.
CAM
)
...
...
@@ -654,15 +734,15 @@ def test_camera_boxes3d():
th_boxes
=
torch
.
tensor
(
[[
28.29669987
,
-
0.5557558
,
-
1.30332506
,
1.47000003
,
2.23000002
,
1.48000002
,
-
1.57000005
1.48000002
,
-
1.57000005
-
0.13603681398218053
*
4
-
2
*
np
.
pi
],
[
26.66901946
,
21.82302134
,
-
1.73605708
,
1.55999994
,
3.48000002
,
1.39999998
,
-
1.69000006
1.39999998
,
-
1.69000006
-
0.13603681398218053
*
4
-
2
*
np
.
pi
],
[
31.31977974
,
8.16214412
,
-
1.62177875
,
1.74000001
,
3.76999998
,
1.48000002
,
2.78999996
1.48000002
,
2.78999996
-
0.13603681398218053
*
4
-
2
*
np
.
pi
]],
dtype
=
torch
.
float32
)
cam_th_boxes
=
Box3DMode
.
convert
(
th_boxes
,
Box3DMode
.
LIDAR
,
Box3DMode
.
CAM
)
...
...
@@ -675,13 +755,26 @@ def test_camera_boxes3d():
# test box concatenation
expected_tensor
=
Box3DMode
.
convert
(
torch
.
tensor
(
[[
1.7802081
,
2.516249
,
-
1.7501148
,
1.75
,
3.39
,
1.65
,
1.48
],
[
8.959413
,
2.4567227
,
-
1.6357126
,
1.54
,
4.01
,
1.57
,
1.62
],
[
28.2967
,
-
0.5557558
,
-
1.303325
,
1.47
,
2.23
,
1.48
,
-
1.57
],
[
26.66902
,
21.82302
,
-
1.736057
,
1.56
,
3.48
,
1.4
,
-
1.69
],
[
31.31978
,
8.162144
,
-
1.6217787
,
1.74
,
3.77
,
1.48
,
2.79
]]),
Box3DMode
.
LIDAR
,
Box3DMode
.
CAM
)
torch
.
tensor
([[
1.7802081
,
2.516249
,
-
1.7501148
,
1.75
,
3.39
,
1.65
,
1.48
-
0.13603681398218053
*
4
-
2
*
np
.
pi
],
[
8.959413
,
2.4567227
,
-
1.6357126
,
1.54
,
4.01
,
1.57
,
1.62
-
0.13603681398218053
*
4
-
2
*
np
.
pi
],
[
28.2967
,
-
0.5557558
,
-
1.303325
,
1.47
,
2.23
,
1.48
,
-
1.57
-
0.13603681398218053
*
4
-
2
*
np
.
pi
],
[
26.66902
,
21.82302
,
-
1.736057
,
1.56
,
3.48
,
1.4
,
-
1.69
-
0.13603681398218053
*
4
-
2
*
np
.
pi
],
[
31.31978
,
8.162144
,
-
1.6217787
,
1.74
,
3.77
,
1.48
,
2.79
-
0.13603681398218053
*
4
-
2
*
np
.
pi
]]),
Box3DMode
.
LIDAR
,
Box3DMode
.
CAM
)
boxes
=
CameraInstance3DBoxes
.
cat
([
boxes_1
,
boxes_2
])
assert
torch
.
allclose
(
boxes
.
tensor
,
expected_tensor
)
...
...
@@ -690,28 +783,60 @@ def test_camera_boxes3d():
[
-
0.2517
,
0.9697
,
6.7053
],
[
0.5520
,
0.5265
,
0.6533
],
[
-
0.5358
,
1.4741
,
4.5870
]])
expected_tensor
=
Box3DMode
.
convert
(
torch
.
tensor
(
[[
1.7802081
,
-
2.516249
,
-
1.7501148
,
1.75
,
3.39
,
1.65
,
1.6615927
],
[
8.959413
,
-
2.4567227
,
-
1.6357126
,
1.54
,
4.01
,
1.57
,
1.5215927
],
[
28.2967
,
0.5557558
,
-
1.303325
,
1.47
,
2.23
,
1.48
,
4.7115927
],
[
26.66902
,
-
21.82302
,
-
1.736057
,
1.56
,
3.48
,
1.4
,
4.8315926
],
[
31.31978
,
-
8.162144
,
-
1.6217787
,
1.74
,
3.77
,
1.48
,
0.35159278
]]),
Box3DMode
.
LIDAR
,
Box3DMode
.
CAM
)
torch
.
tensor
([[
1.7802081
,
-
2.516249
,
-
1.7501148
,
1.75
,
3.39
,
1.65
,
1.6615927
+
0.13603681398218053
*
4
-
np
.
pi
],
[
8.959413
,
-
2.4567227
,
-
1.6357126
,
1.54
,
4.01
,
1.57
,
1.5215927
+
0.13603681398218053
*
4
-
np
.
pi
],
[
28.2967
,
0.5557558
,
-
1.303325
,
1.47
,
2.23
,
1.48
,
4.7115927
+
0.13603681398218053
*
4
-
np
.
pi
],
[
26.66902
,
-
21.82302
,
-
1.736057
,
1.56
,
3.48
,
1.4
,
4.8315926
+
0.13603681398218053
*
4
-
np
.
pi
],
[
31.31978
,
-
8.162144
,
-
1.6217787
,
1.74
,
3.77
,
1.48
,
0.35159278
+
0.13603681398218053
*
4
-
np
.
pi
]]),
Box3DMode
.
LIDAR
,
Box3DMode
.
CAM
)
points
=
boxes
.
flip
(
'horizontal'
,
points
)
expected_points
=
torch
.
tensor
([[
-
0.6762
,
1.4658
,
1.2559
],
[
-
0.8784
,
1.3857
,
4.7814
],
[
0.2517
,
0.9697
,
6.7053
],
[
-
0.5520
,
0.5265
,
0.6533
],
[
0.5358
,
1.4741
,
4.5870
]])
assert
torch
.
allclose
(
boxes
.
tensor
,
expected_tensor
)
yaw_normalized_tensor
=
boxes
.
tensor
.
clone
()
yaw_normalized_tensor
[:,
-
1
:]
=
limit_period
(
yaw_normalized_tensor
[:,
-
1
:],
period
=
np
.
pi
*
2
)
assert
torch
.
allclose
(
yaw_normalized_tensor
,
expected_tensor
,
1e-3
)
assert
torch
.
allclose
(
points
,
expected_points
,
1e-3
)
expected_tensor
=
torch
.
tensor
(
[[
2.5162
,
1.7501
,
-
1.7802
,
3.3900
,
1.6500
,
1.7500
,
-
1.6616
],
[
2.4567
,
1.6357
,
-
8.9594
,
4.0100
,
1.5700
,
1.5400
,
-
1.5216
],
[
-
0.5558
,
1.3033
,
-
28.2967
,
2.2300
,
1.4800
,
1.4700
,
-
4.7116
],
[
21.8230
,
1.7361
,
-
26.6690
,
3.4800
,
1.4000
,
1.5600
,
-
4.8316
],
[
8.1621
,
1.6218
,
-
31.3198
,
3.7700
,
1.4800
,
1.7400
,
-
0.3516
]])
[[
2.5162
,
1.7501
,
-
1.7802
,
1.7500
,
1.6500
,
3.3900
,
1.6616
+
0.13603681398218053
*
4
-
np
.
pi
/
2
],
[
2.4567
,
1.6357
,
-
8.9594
,
1.5400
,
1.5700
,
4.0100
,
1.5216
+
0.13603681398218053
*
4
-
np
.
pi
/
2
],
[
-
0.5558
,
1.3033
,
-
28.2967
,
1.4700
,
1.4800
,
2.2300
,
4.7116
+
0.13603681398218053
*
4
-
np
.
pi
/
2
],
[
21.8230
,
1.7361
,
-
26.6690
,
1.5600
,
1.4000
,
3.4800
,
4.8316
+
0.13603681398218053
*
4
-
np
.
pi
/
2
],
[
8.1621
,
1.6218
,
-
31.3198
,
1.7400
,
1.4800
,
3.7700
,
0.3516
+
0.13603681398218053
*
4
-
np
.
pi
/
2
]])
boxes_flip_vert
=
boxes
.
clone
()
points
=
boxes_flip_vert
.
flip
(
'vertical'
,
points
)
expected_points
=
torch
.
tensor
([[
-
0.6762
,
1.4658
,
-
1.2559
],
...
...
@@ -719,19 +844,38 @@ def test_camera_boxes3d():
[
0.2517
,
0.9697
,
-
6.7053
],
[
-
0.5520
,
0.5265
,
-
0.6533
],
[
0.5358
,
1.4741
,
-
4.5870
]])
assert
torch
.
allclose
(
boxes_flip_vert
.
tensor
,
expected_tensor
,
1e-4
)
yaw_normalized_tensor
=
boxes_flip_vert
.
tensor
.
clone
()
yaw_normalized_tensor
[:,
-
1
:]
=
limit_period
(
yaw_normalized_tensor
[:,
-
1
:],
period
=
np
.
pi
*
2
)
expected_tensor
[:,
-
1
:]
=
limit_period
(
expected_tensor
[:,
-
1
:],
period
=
np
.
pi
*
2
)
assert
torch
.
allclose
(
yaw_normalized_tensor
,
expected_tensor
,
1e-4
)
assert
torch
.
allclose
(
points
,
expected_points
)
# test box rotation
# with input torch.Tensor points and angle
expected_tensor
=
Box3DMode
.
convert
(
torch
.
tensor
(
[[
1.4225
,
-
2.7344
,
-
1.7501
,
1.7500
,
3.3900
,
1.6500
,
1.7976
],
[
8.5435
,
-
3.6491
,
-
1.6357
,
1.5400
,
4.0100
,
1.5700
,
1.6576
],
[
28.1106
,
-
3.2869
,
-
1.3033
,
1.4700
,
2.2300
,
1.4800
,
4.8476
],
[
23.4630
,
-
25.2382
,
-
1.7361
,
1.5600
,
3.4800
,
1.4000
,
4.9676
],
[
29.9235
,
-
12.3342
,
-
1.6218
,
1.7400
,
3.7700
,
1.4800
,
0.4876
]]),
Box3DMode
.
LIDAR
,
Box3DMode
.
CAM
)
torch
.
tensor
([[
1.4225
,
-
2.7344
,
-
1.7501
,
1.7500
,
3.3900
,
1.6500
,
1.7976
+
0.13603681398218053
*
2
-
np
.
pi
],
[
8.5435
,
-
3.6491
,
-
1.6357
,
1.5400
,
4.0100
,
1.5700
,
1.6576
+
0.13603681398218053
*
2
-
np
.
pi
],
[
28.1106
,
-
3.2869
,
-
1.3033
,
1.4700
,
2.2300
,
1.4800
,
4.8476
+
0.13603681398218053
*
2
-
np
.
pi
],
[
23.4630
,
-
25.2382
,
-
1.7361
,
1.5600
,
3.4800
,
1.4000
,
4.9676
+
0.13603681398218053
*
2
-
np
.
pi
],
[
29.9235
,
-
12.3342
,
-
1.6218
,
1.7400
,
3.7700
,
1.4800
,
0.4876
+
0.13603681398218053
*
2
-
np
.
pi
]]),
Box3DMode
.
LIDAR
,
Box3DMode
.
CAM
)
points
,
rot_mat_T
=
boxes
.
rotate
(
torch
.
tensor
(
0.13603681398218053
),
points
)
expected_points
=
torch
.
tensor
([[
-
0.8403
,
1.4658
,
-
1.1526
],
[
-
1.5187
,
1.3857
,
-
4.6181
],
...
...
@@ -741,7 +885,12 @@ def test_camera_boxes3d():
expected_rot_mat_T
=
torch
.
tensor
([[
0.9908
,
0.0000
,
-
0.1356
],
[
0.0000
,
1.0000
,
0.0000
],
[
0.1356
,
0.0000
,
0.9908
]])
assert
torch
.
allclose
(
boxes
.
tensor
,
expected_tensor
,
1e-3
)
yaw_normalized_tensor
=
boxes
.
tensor
.
clone
()
yaw_normalized_tensor
[:,
-
1
:]
=
limit_period
(
yaw_normalized_tensor
[:,
-
1
:],
period
=
np
.
pi
*
2
)
expected_tensor
[:,
-
1
:]
=
limit_period
(
expected_tensor
[:,
-
1
:],
period
=
np
.
pi
*
2
)
assert
torch
.
allclose
(
yaw_normalized_tensor
,
expected_tensor
,
1e-3
)
assert
torch
.
allclose
(
points
,
expected_points
,
1e-3
)
assert
torch
.
allclose
(
rot_mat_T
,
expected_rot_mat_T
,
1e-3
)
...
...
@@ -751,7 +900,10 @@ def test_camera_boxes3d():
rot_mat
=
np
.
array
([[
0.99076125
,
0.
,
-
0.13561762
],
[
0.
,
1.
,
0.
],
[
0.13561762
,
0.
,
0.99076125
]])
points
,
rot_mat_T
=
boxes
.
rotate
(
rot_mat
,
points
)
assert
torch
.
allclose
(
boxes
.
tensor
,
expected_tensor
,
1e-3
)
yaw_normalized_tensor
=
boxes
.
tensor
.
clone
()
yaw_normalized_tensor
[:,
-
1
:]
=
limit_period
(
yaw_normalized_tensor
[:,
-
1
:],
period
=
np
.
pi
*
2
)
assert
torch
.
allclose
(
yaw_normalized_tensor
,
expected_tensor
,
1e-3
)
assert
torch
.
allclose
(
points
,
expected_points
,
1e-3
)
assert
torch
.
allclose
(
rot_mat_T
,
expected_rot_mat_T
,
1e-3
)
...
...
@@ -788,51 +940,61 @@ def test_camera_boxes3d():
expected_tensor
=
Box3DMode
.
convert
(
torch
.
tensor
([[
1.0443488
,
-
2.9183323
,
-
1.7599131
,
1.7597977
,
3.4089797
,
1.6592377
,
1.9336663
1.9336663
-
np
.
pi
],
[
8.014273
,
-
4.8007393
,
-
1.6448704
,
1.5486219
,
4.0324507
,
1.57879
,
1.7936664
4.0324507
,
1.57879
,
1.7936664
-
np
.
pi
],
[
27.558605
,
-
7.1084175
,
-
1.310622
,
1.4782301
,
2.242485
,
1.488286
,
4.9836664
2.242485
,
1.488286
,
4.9836664
-
np
.
pi
],
[
19.934517
,
-
28.344835
,
-
1.7457767
,
1.5687338
,
3.4994833
,
1.4078381
,
5.1036663
3.4994833
,
1.4078381
,
5.1036663
-
np
.
pi
],
[
28.130915
,
-
16.369587
,
-
1.6308585
,
1.7497417
,
3.791107
,
1.488286
,
0.6236664
3.791107
,
1.488286
,
0.6236664
-
np
.
pi
]]),
Box3DMode
.
LIDAR
,
Box3DMode
.
CAM
)
boxes
.
scale
(
1.00559866335275
)
assert
torch
.
allclose
(
boxes
.
tensor
,
expected_tensor
)
yaw_normalized_tensor
=
boxes
.
tensor
.
clone
()
yaw_normalized_tensor
[:,
-
1
:]
=
limit_period
(
yaw_normalized_tensor
[:,
-
1
:],
period
=
np
.
pi
*
2
)
expected_tensor
[:,
-
1
:]
=
limit_period
(
expected_tensor
[:,
-
1
:],
period
=
np
.
pi
*
2
)
assert
torch
.
allclose
(
yaw_normalized_tensor
,
expected_tensor
)
# test box translation
expected_tensor
=
Box3DMode
.
convert
(
torch
.
tensor
([[
1.1281544
,
-
3.0507944
,
-
1.9169292
,
1.7597977
,
3.4089797
,
1.6592377
,
1.9336663
1.9336663
-
np
.
pi
],
[
8.098079
,
-
4.9332013
,
-
1.8018866
,
1.5486219
,
4.0324507
,
1.57879
,
1.7936664
4.0324507
,
1.57879
,
1.7936664
-
np
.
pi
],
[
27.64241
,
-
7.2408795
,
-
1.4676381
,
1.4782301
,
2.242485
,
1.488286
,
4.9836664
2.242485
,
1.488286
,
4.9836664
-
np
.
pi
],
[
20.018322
,
-
28.477297
,
-
1.9027928
,
1.5687338
,
3.4994833
,
1.4078381
,
5.1036663
3.4994833
,
1.4078381
,
5.1036663
-
np
.
pi
],
[
28.21472
,
-
16.502048
,
-
1.7878747
,
1.7497417
,
3.791107
,
1.488286
,
0.6236664
3.791107
,
1.488286
,
0.6236664
-
np
.
pi
]]),
Box3DMode
.
LIDAR
,
Box3DMode
.
CAM
)
boxes
.
translate
(
torch
.
tensor
([
0.13246193
,
0.15701613
,
0.0838056
]))
assert
torch
.
allclose
(
boxes
.
tensor
,
expected_tensor
)
yaw_normalized_tensor
=
boxes
.
tensor
.
clone
()
yaw_normalized_tensor
[:,
-
1
:]
=
limit_period
(
yaw_normalized_tensor
[:,
-
1
:],
period
=
np
.
pi
*
2
)
expected_tensor
[:,
-
1
:]
=
limit_period
(
expected_tensor
[:,
-
1
:],
period
=
np
.
pi
*
2
)
assert
torch
.
allclose
(
yaw_normalized_tensor
,
expected_tensor
)
# test bbox in_range_bev
expected_tensor
=
torch
.
tensor
([
1
,
1
,
1
,
1
,
1
],
dtype
=
torch
.
bool
)
...
...
@@ -846,6 +1008,14 @@ def test_camera_boxes3d():
mask
=
boxes
.
in_range_3d
([
-
2
,
-
5
,
0
,
20
,
2
,
22
])
assert
(
mask
==
expected_tensor
).
all
()
expected_tensor
=
torch
.
tensor
(
[[
3.0508
,
1.1282
,
1.7598
,
3.4090
,
-
5.9203
],
[
4.9332
,
8.0981
,
1.5486
,
4.0325
,
-
6.0603
],
[
7.2409
,
27.6424
,
1.4782
,
2.2425
,
-
2.8703
],
[
28.4773
,
20.0183
,
1.5687
,
3.4995
,
-
2.7503
],
[
16.5020
,
28.2147
,
1.7497
,
3.7911
,
-
0.9471
]])
assert
torch
.
allclose
(
boxes
.
bev
,
expected_tensor
,
atol
=
1e-3
)
# test properties
assert
torch
.
allclose
(
boxes
.
bottom_center
,
boxes
.
tensor
[:,
:
3
])
expected_tensor
=
(
...
...
@@ -858,13 +1028,13 @@ def test_camera_boxes3d():
assert
(
boxes
.
tensor
[:,
6
]
>=
-
np
.
pi
/
2
).
all
()
Box3DMode
.
convert
(
boxes
,
Box3DMode
.
LIDAR
,
Box3DMode
.
LIDAR
)
expected_tesor
=
boxes
.
tensor
.
clone
()
assert
torch
.
allclose
(
expected_tesor
,
boxes
.
tensor
)
expected_te
n
sor
=
boxes
.
tensor
.
clone
()
assert
torch
.
allclose
(
expected_te
n
sor
,
boxes
.
tensor
)
boxes
.
flip
()
boxes
.
flip
()
boxes
.
limit_yaw
()
assert
torch
.
allclose
(
expected_tesor
,
boxes
.
tensor
)
assert
torch
.
allclose
(
expected_te
n
sor
,
boxes
.
tensor
)
# test nearest_bev
# BEV box in lidar coordinates (x, y)
...
...
@@ -878,54 +1048,66 @@ def test_camera_boxes3d():
expected_tensor
=
lidar_expected_tensor
.
clone
()
expected_tensor
[:,
0
::
2
]
=
-
lidar_expected_tensor
[:,
[
3
,
1
]]
expected_tensor
[:,
1
::
2
]
=
lidar_expected_tensor
[:,
0
::
2
]
# the pytorch print loses some precision
assert
torch
.
allclose
(
boxes
.
nearest_bev
,
expected_tensor
,
rtol
=
1e-4
,
atol
=
1e-7
)
# obtained by the print of the original implementation
expected_tensor
=
torch
.
tensor
([[[
3.2684e+00
,
2.5769e-01
,
-
7.7767e-01
],
[
1.6232e+00
,
2.5769e-01
,
-
1.5301e-01
],
[
1.6232e+00
,
1.9169e+00
,
-
1.5301e-01
],
[
3.2684e+00
,
1.9169e+00
,
-
7.7767e-01
],
[
4.4784e+00
,
2.5769e-01
,
2.4093e+00
],
[
2.8332e+00
,
2.5769e-01
,
3.0340e+00
],
[
2.8332e+00
,
1.9169e+00
,
3.0340e+00
],
[
4.4784e+00
,
1.9169e+00
,
2.4093e+00
]],
[[
5.2427e+00
,
2.2310e-01
,
5.9606e+00
],
[
3.7324e+00
,
2.2310e-01
,
6.3029e+00
],
[
3.7324e+00
,
1.8019e+00
,
6.3029e+00
],
[
5.2427e+00
,
1.8019e+00
,
5.9606e+00
],
[
6.1340e+00
,
2.2310e-01
,
9.8933e+00
],
[
4.6237e+00
,
2.2310e-01
,
1.0236e+01
],
[
4.6237e+00
,
1.8019e+00
,
1.0236e+01
],
[
6.1340e+00
,
1.8019e+00
,
9.8933e+00
]],
[[
7.6525e+00
,
-
2.0648e-02
,
2.6364e+01
],
[
6.2283e+00
,
-
2.0648e-02
,
2.6760e+01
],
[
6.2283e+00
,
1.4676e+00
,
2.6760e+01
],
[
7.6525e+00
,
1.4676e+00
,
2.6364e+01
],
[
8.2534e+00
,
-
2.0648e-02
,
2.8525e+01
],
[
6.8292e+00
,
-
2.0648e-02
,
2.8921e+01
],
[
6.8292e+00
,
1.4676e+00
,
2.8921e+01
],
[
8.2534e+00
,
1.4676e+00
,
2.8525e+01
]],
[[
2.8535e+01
,
4.9495e-01
,
1.8102e+01
],
[
2.7085e+01
,
4.9495e-01
,
1.8700e+01
],
[
2.7085e+01
,
1.9028e+00
,
1.8700e+01
],
[
2.8535e+01
,
1.9028e+00
,
1.8102e+01
],
[
2.9870e+01
,
4.9495e-01
,
2.1337e+01
],
[
2.8420e+01
,
4.9495e-01
,
2.1935e+01
],
[
2.8420e+01
,
1.9028e+00
,
2.1935e+01
],
[
2.9870e+01
,
1.9028e+00
,
2.1337e+01
]],
[[
1.4452e+01
,
2.9959e-01
,
2.8612e+01
],
[
1.5474e+01
,
2.9959e-01
,
3.0032e+01
],
[
1.5474e+01
,
1.7879e+00
,
3.0032e+01
],
[
1.4452e+01
,
1.7879e+00
,
2.8612e+01
],
[
1.7530e+01
,
2.9959e-01
,
2.6398e+01
],
[
1.8552e+01
,
2.9959e-01
,
2.7818e+01
],
[
1.8552e+01
,
1.7879e+00
,
2.7818e+01
],
[
1.7530e+01
,
1.7879e+00
,
2.6398e+01
]]])
# the pytorch print loses some precision
assert
torch
.
allclose
(
boxes
.
corners
,
expected_tensor
,
rtol
=
1e-4
,
atol
=
1e-7
)
expected_tensor
=
torch
.
tensor
([[[
2.8332e+00
,
2.5769e-01
,
-
7.7767e-01
],
[
1.6232e+00
,
2.5769e-01
,
2.4093e+00
],
[
1.6232e+00
,
1.9169e+00
,
2.4093e+00
],
[
2.8332e+00
,
1.9169e+00
,
-
7.7767e-01
],
[
4.4784e+00
,
2.5769e-01
,
-
1.5302e-01
],
[
3.2684e+00
,
2.5769e-01
,
3.0340e+00
],
[
3.2684e+00
,
1.9169e+00
,
3.0340e+00
],
[
4.4784e+00
,
1.9169e+00
,
-
1.5302e-01
]],
[[
4.6237e+00
,
2.2310e-01
,
5.9606e+00
],
[
3.7324e+00
,
2.2310e-01
,
9.8933e+00
],
[
3.7324e+00
,
1.8019e+00
,
9.8933e+00
],
[
4.6237e+00
,
1.8019e+00
,
5.9606e+00
],
[
6.1340e+00
,
2.2310e-01
,
6.3029e+00
],
[
5.2427e+00
,
2.2310e-01
,
1.0236e+01
],
[
5.2427e+00
,
1.8019e+00
,
1.0236e+01
],
[
6.1340e+00
,
1.8019e+00
,
6.3029e+00
]],
[[
6.8292e+00
,
-
2.0648e-02
,
2.6364e+01
],
[
6.2283e+00
,
-
2.0648e-02
,
2.8525e+01
],
[
6.2283e+00
,
1.4676e+00
,
2.8525e+01
],
[
6.8292e+00
,
1.4676e+00
,
2.6364e+01
],
[
8.2534e+00
,
-
2.0648e-02
,
2.6760e+01
],
[
7.6525e+00
,
-
2.0648e-02
,
2.8921e+01
],
[
7.6525e+00
,
1.4676e+00
,
2.8921e+01
],
[
8.2534e+00
,
1.4676e+00
,
2.6760e+01
]],
[[
2.8420e+01
,
4.9495e-01
,
1.8102e+01
],
[
2.7085e+01
,
4.9495e-01
,
2.1337e+01
],
[
2.7085e+01
,
1.9028e+00
,
2.1337e+01
],
[
2.8420e+01
,
1.9028e+00
,
1.8102e+01
],
[
2.9870e+01
,
4.9495e-01
,
1.8700e+01
],
[
2.8535e+01
,
4.9495e-01
,
2.1935e+01
],
[
2.8535e+01
,
1.9028e+00
,
2.1935e+01
],
[
2.9870e+01
,
1.9028e+00
,
1.8700e+01
]],
[[
1.4452e+01
,
2.9959e-01
,
2.7818e+01
],
[
1.7530e+01
,
2.9959e-01
,
3.0032e+01
],
[
1.7530e+01
,
1.7879e+00
,
3.0032e+01
],
[
1.4452e+01
,
1.7879e+00
,
2.7818e+01
],
[
1.5474e+01
,
2.9959e-01
,
2.6398e+01
],
[
1.8552e+01
,
2.9959e-01
,
2.8612e+01
],
[
1.8552e+01
,
1.7879e+00
,
2.8612e+01
],
[
1.5474e+01
,
1.7879e+00
,
2.6398e+01
]]])
assert
torch
.
allclose
(
boxes
.
corners
,
expected_tensor
,
rtol
=
1e-3
,
atol
=
1e-4
)
th_boxes
=
torch
.
tensor
(
[[
28.29669987
,
-
0.5557558
,
-
1.30332506
,
1.47000003
,
2.23000002
,
1.48000002
,
-
1.57000005
],
[
26.66901946
,
21.82302134
,
-
1.73605708
,
1.55999994
,
3.48000002
,
1.39999998
,
-
1.69000006
],
[
31.31977974
,
8.16214412
,
-
1.62177875
,
1.74000001
,
3.76999998
,
1.48000002
,
2.78999996
]],
dtype
=
torch
.
float32
)
# test init with a given origin
boxes_origin_given
=
CameraInstance3DBoxes
(
...
...
@@ -948,17 +1130,17 @@ def test_boxes3d_overlaps():
# Test LiDAR boxes 3D overlaps
boxes1_tensor
=
torch
.
tensor
(
[[
1.8
,
-
2.5
,
-
1.8
,
1.75
,
3.39
,
1.65
,
1.6615927
],
[
8.9
,
-
2.5
,
-
1.6
,
1.54
,
4.01
,
1.57
,
1.5215927
],
[
28.3
,
0.5
,
-
1.3
,
1.47
,
2.23
,
1.48
,
4.7115927
],
[
31.3
,
-
8.2
,
-
1.6
,
1.74
,
3.77
,
1.48
,
0.35
]],
[[
1.8
,
-
2.5
,
-
1.8
,
1.75
,
3.39
,
1.65
,
-
1.6615927
],
[
8.9
,
-
2.5
,
-
1.6
,
1.54
,
4.01
,
1.57
,
-
1.5215927
],
[
28.3
,
0.5
,
-
1.3
,
1.47
,
2.23
,
1.48
,
-
4.7115927
],
[
31.3
,
-
8.2
,
-
1.6
,
1.74
,
3.77
,
1.48
,
-
0.35
]],
device
=
'cuda'
)
boxes1
=
LiDARInstance3DBoxes
(
boxes1_tensor
)
boxes2_tensor
=
torch
.
tensor
([[
1.2
,
-
3.0
,
-
1.9
,
1.8
,
3.4
,
1.7
,
1.9
],
[
8.1
,
-
2.9
,
-
1.8
,
1.5
,
4.1
,
1.6
,
1.8
],
[
31.3
,
-
8.2
,
-
1.6
,
1.74
,
3.77
,
1.48
,
0.35
],
[
20.1
,
-
28.5
,
-
1.9
,
1.6
,
3.5
,
1.4
,
5.1
]],
boxes2_tensor
=
torch
.
tensor
([[
1.2
,
-
3.0
,
-
1.9
,
1.8
,
3.4
,
1.7
,
-
1.9
],
[
8.1
,
-
2.9
,
-
1.8
,
1.5
,
4.1
,
1.6
,
-
1.8
],
[
31.3
,
-
8.2
,
-
1.6
,
1.74
,
3.77
,
1.48
,
-
0.35
],
[
20.1
,
-
28.5
,
-
1.9
,
1.6
,
3.5
,
1.4
,
-
5.1
]],
device
=
'cuda'
)
boxes2
=
LiDARInstance3DBoxes
(
boxes2_tensor
)
...
...
@@ -1101,6 +1283,7 @@ def test_depth_boxes3d():
[
-
2.4016
,
-
3.2521
,
0.4426
,
0.8234
,
0.5325
,
1.0099
,
-
0.1215
],
[
-
2.5181
,
-
2.5298
,
-
0.4321
,
0.8597
,
0.6193
,
1.0204
,
-
0.0493
],
[
-
1.5434
,
-
2.4951
,
-
0.5570
,
0.9385
,
2.1404
,
0.8954
,
-
0.0585
]])
expected_tensor
[:,
-
1
:]
-=
0.022998953275003075
*
2
points
,
rot_mat_T
=
boxes_rot
.
rotate
(
-
0.022998953275003075
,
points
)
expected_points
=
torch
.
tensor
([[
-
0.7049
,
-
1.2400
,
-
1.4658
,
2.5359
],
[
-
0.9881
,
-
4.7599
,
-
1.3857
,
0.7167
],
...
...
@@ -1115,10 +1298,13 @@ def test_depth_boxes3d():
assert
torch
.
allclose
(
rot_mat_T
,
expected_rot_mat_T
,
1e-3
)
# with input torch.Tensor points and rotation matrix
points
,
rot_mat_T
=
boxes
.
rotate
(
0.022998953275003075
,
points
)
# back
points
,
rot_mat_T
=
boxes
.
rotate
(
-
0.022998953275003075
,
points
)
# back
rot_mat
=
np
.
array
([[
0.99973554
,
0.02299693
,
0.
],
[
-
0.02299693
,
0.99973554
,
0.
],
[
0.
,
0.
,
1.
]])
points
,
rot_mat_T
=
boxes
.
rotate
(
rot_mat
,
points
)
expected_rot_mat_T
=
torch
.
tensor
([[
0.99973554
,
0.02299693
,
0.0000
],
[
-
0.02299693
,
0.99973554
,
0.0000
],
[
0.0000
,
0.0000
,
1.0000
]])
assert
torch
.
allclose
(
boxes_rot
.
tensor
,
expected_tensor
,
1e-3
)
assert
torch
.
allclose
(
points
,
expected_points
,
1e-3
)
assert
torch
.
allclose
(
rot_mat_T
,
expected_rot_mat_T
,
1e-3
)
...
...
@@ -1135,27 +1321,64 @@ def test_depth_boxes3d():
[
-
0.0974
,
6.7093
,
-
0.9697
,
0.5599
],
[
0.5669
,
0.6404
,
-
0.5265
,
1.0032
],
[
-
0.4302
,
4.5981
,
-
1.4741
,
0.0556
]])
expected_rot_mat_T_np
=
np
.
array
([[
0.9997
,
-
0.023
0
,
0.0000
],
[
0.023
0
,
0.9997
,
0.0000
],
expected_rot_mat_T_np
=
np
.
array
([[
0.9997
3554
,
-
0.02
29969
3
,
0.0000
],
[
0.02
29969
3
,
0.9997
3554
,
0.0000
],
[
0.0000
,
0.0000
,
1.0000
]])
expected_tensor
=
torch
.
tensor
(
[[
-
1.5434
,
-
2.4951
,
-
0.5570
,
0.9385
,
2.1404
,
0.8954
,
-
0.0585
],
[
-
2.4016
,
-
3.2521
,
0.4426
,
0.8234
,
0.5325
,
1.0099
,
-
0.1215
],
[
-
2.5181
,
-
2.5298
,
-
0.4321
,
0.8597
,
0.6193
,
1.0204
,
-
0.0493
],
[
-
1.5434
,
-
2.4951
,
-
0.5570
,
0.9385
,
2.1404
,
0.8954
,
-
0.0585
]])
expected_tensor
[:,
-
1
:]
-=
0.022998953275003075
*
2
assert
torch
.
allclose
(
boxes
.
tensor
,
expected_tensor
,
1e-3
)
assert
np
.
allclose
(
points_np
,
expected_points_np
,
1e-3
)
assert
np
.
allclose
(
rot_mat_T_np
,
expected_rot_mat_T_np
,
1e-3
)
# with input DepthPoints and rotation matrix
points_np
,
rot_mat_T_np
=
boxes
.
rotate
(
0.022998953275003075
,
points_np
)
points_np
,
rot_mat_T_np
=
boxes
.
rotate
(
-
0.022998953275003075
,
points_np
)
depth_points
=
DepthPoints
(
points_np
,
points_dim
=
4
)
depth_points
,
rot_mat_T_np
=
boxes
.
rotate
(
rot_mat
,
depth_points
)
points_np
=
depth_points
.
tensor
.
numpy
()
expected_rot_mat_T_np
=
expected_rot_mat_T_np
.
T
assert
torch
.
allclose
(
boxes
.
tensor
,
expected_tensor
,
1e-3
)
assert
np
.
allclose
(
points_np
,
expected_points_np
,
1e-3
)
assert
np
.
allclose
(
rot_mat_T_np
,
expected_rot_mat_T_np
,
1e-3
)
expected_tensor
=
torch
.
tensor
([[[
-
2.1217
,
-
3.5105
,
-
0.5570
],
[
-
2.1217
,
-
3.5105
,
0.3384
],
[
-
1.8985
,
-
1.3818
,
0.3384
],
[
-
1.8985
,
-
1.3818
,
-
0.5570
],
[
-
1.1883
,
-
3.6084
,
-
0.5570
],
[
-
1.1883
,
-
3.6084
,
0.3384
],
[
-
0.9651
,
-
1.4796
,
0.3384
],
[
-
0.9651
,
-
1.4796
,
-
0.5570
]],
[[
-
2.8519
,
-
3.4460
,
0.4426
],
[
-
2.8519
,
-
3.4460
,
1.4525
],
[
-
2.7632
,
-
2.9210
,
1.4525
],
[
-
2.7632
,
-
2.9210
,
0.4426
],
[
-
2.0401
,
-
3.5833
,
0.4426
],
[
-
2.0401
,
-
3.5833
,
1.4525
],
[
-
1.9513
,
-
3.0582
,
1.4525
],
[
-
1.9513
,
-
3.0582
,
0.4426
]],
[[
-
2.9755
,
-
2.7971
,
-
0.4321
],
[
-
2.9755
,
-
2.7971
,
0.5883
],
[
-
2.9166
,
-
2.1806
,
0.5883
],
[
-
2.9166
,
-
2.1806
,
-
0.4321
],
[
-
2.1197
,
-
2.8789
,
-
0.4321
],
[
-
2.1197
,
-
2.8789
,
0.5883
],
[
-
2.0608
,
-
2.2624
,
0.5883
],
[
-
2.0608
,
-
2.2624
,
-
0.4321
]],
[[
-
2.1217
,
-
3.5105
,
-
0.5570
],
[
-
2.1217
,
-
3.5105
,
0.3384
],
[
-
1.8985
,
-
1.3818
,
0.3384
],
[
-
1.8985
,
-
1.3818
,
-
0.5570
],
[
-
1.1883
,
-
3.6084
,
-
0.5570
],
[
-
1.1883
,
-
3.6084
,
0.3384
],
[
-
0.9651
,
-
1.4796
,
0.3384
],
[
-
0.9651
,
-
1.4796
,
-
0.5570
]]])
assert
torch
.
allclose
(
boxes
.
corners
,
expected_tensor
,
1e-3
)
th_boxes
=
torch
.
tensor
(
[[
0.61211395
,
0.8129094
,
0.10563634
,
1.497534
,
0.16927195
,
0.27956772
],
[
1.430009
,
0.49797538
,
0.9382923
,
0.07694054
,
0.9312509
,
1.8919173
]],
...
...
@@ -1182,6 +1405,11 @@ def test_depth_boxes3d():
mask
=
boxes
.
nonempty
()
assert
(
mask
==
expected_tensor
).
all
()
# test bbox in_range
expected_tensor
=
torch
.
tensor
([
0
,
1
],
dtype
=
torch
.
bool
)
mask
=
boxes
.
in_range_3d
([
1
,
0
,
-
2
,
2
,
1
,
5
])
assert
(
mask
==
expected_tensor
).
all
()
expected_tensor
=
torch
.
tensor
([[[
-
0.1030
,
0.6649
,
0.1056
],
[
-
0.1030
,
0.6649
,
0.3852
],
[
-
0.1030
,
0.9029
,
0.3852
],
...
...
@@ -1198,11 +1426,11 @@ def test_depth_boxes3d():
[
1.5112
,
-
0.0352
,
2.8302
],
[
1.5112
,
0.8986
,
2.8302
],
[
1.5112
,
0.8986
,
0.9383
]]])
torch
.
allclose
(
boxes
.
corners
,
expected_tensor
)
assert
torch
.
allclose
(
boxes
.
corners
,
expected_tensor
,
1e-3
)
# test points in boxes
if
torch
.
cuda
.
is_available
():
box_idxs_of_pts
=
boxes
.
points_in_boxes
(
points
.
cuda
())
box_idxs_of_pts
=
boxes
.
points_in_boxes
_all
(
points
.
cuda
())
expected_idxs_of_pts
=
torch
.
tensor
(
[[
0
,
0
],
[
0
,
0
],
[
0
,
0
],
[
0
,
0
],
[
0
,
0
]],
device
=
'cuda:0'
,
...
...
@@ -1211,8 +1439,8 @@ def test_depth_boxes3d():
# test get_surface_line_center
boxes
=
torch
.
tensor
(
[[
0.3294
,
1.0359
,
0.1171
,
1.0822
,
1.1247
,
1.3721
,
0.4916
],
[
-
2.4630
,
-
2.6324
,
-
0.1616
,
0.9202
,
1.7896
,
0.1992
,
0.3185
]])
[[
0.3294
,
1.0359
,
0.1171
,
1.0822
,
1.1247
,
1.3721
,
-
0.4916
],
[
-
2.4630
,
-
2.6324
,
-
0.1616
,
0.9202
,
1.7896
,
0.1992
,
-
0.3185
]])
boxes
=
DepthInstance3DBoxes
(
boxes
,
box_dim
=
boxes
.
shape
[
-
1
],
with_yaw
=
True
,
origin
=
(
0.5
,
0.5
,
0.5
))
surface_center
,
line_center
=
boxes
.
get_surface_line_center
()
...
...
@@ -1260,6 +1488,7 @@ def test_depth_boxes3d():
def
test_rotation_3d_in_axis
():
# clockwise
points
=
torch
.
tensor
([[[
-
0.4599
,
-
0.0471
,
0.0000
],
[
-
0.4599
,
-
0.0471
,
1.8433
],
[
-
0.4599
,
0.0471
,
1.8433
]],
...
...
@@ -1267,15 +1496,115 @@ def test_rotation_3d_in_axis():
[
-
0.2555
,
-
0.2683
,
0.9072
],
[
-
0.2555
,
0.2683
,
0.9072
]]])
rotated
=
rotation_3d_in_axis
(
points
,
torch
.
tensor
([
-
np
.
pi
/
10
,
np
.
pi
/
10
]),
axis
=
0
)
expected_rotated
=
torch
.
tensor
([[[
0.0000
,
-
0.4228
,
-
0.1869
],
[
1.8433
,
-
0.4228
,
-
0.1869
],
[
1.8433
,
-
0.4519
,
-
0.0973
]],
[[
0.0000
,
-
0.3259
,
-
0.1762
],
[
0.9072
,
-
0.3259
,
-
0.1762
],
[
0.9072
,
-
0.1601
,
0.3341
]]])
points
,
torch
.
tensor
([
-
np
.
pi
/
10
,
np
.
pi
/
10
]),
axis
=
0
,
clockwise
=
True
)
expected_rotated
=
torch
.
tensor
(
[[[
-
0.4599
,
-
0.0448
,
-
0.0146
],
[
-
0.4599
,
-
0.6144
,
1.7385
],
[
-
0.4599
,
-
0.5248
,
1.7676
]],
[[
-
0.2555
,
-
0.2552
,
0.0829
],
[
-
0.2555
,
0.0252
,
0.9457
],
[
-
0.2555
,
0.5355
,
0.7799
]]],
dtype
=
torch
.
float32
)
assert
torch
.
allclose
(
rotated
,
expected_rotated
,
atol
=
1e-3
)
# anti-clockwise with return rotation mat
points
=
torch
.
tensor
([[[
-
0.4599
,
-
0.0471
,
0.0000
],
[
-
0.4599
,
-
0.0471
,
1.8433
]]])
rotated
=
rotation_3d_in_axis
(
points
,
torch
.
tensor
([
np
.
pi
/
2
]),
axis
=
0
)
expected_rotated
=
torch
.
tensor
([[[
-
0.4599
,
0.0000
,
-
0.0471
],
[
-
0.4599
,
-
1.8433
,
-
0.0471
]]])
assert
torch
.
allclose
(
rotated
,
expected_rotated
,
1e-3
)
points
=
torch
.
tensor
([[[
-
0.4599
,
-
0.0471
,
0.0000
],
[
-
0.4599
,
-
0.0471
,
1.8433
]]])
rotated
,
mat
=
rotation_3d_in_axis
(
points
,
torch
.
tensor
([
np
.
pi
/
2
]),
axis
=
0
,
return_mat
=
True
)
expected_rotated
=
torch
.
tensor
([[[
-
0.4599
,
0.0000
,
-
0.0471
],
[
-
0.4599
,
-
1.8433
,
-
0.0471
]]])
expected_mat
=
torch
.
tensor
([[[
1
,
0
,
0
],
[
0
,
0
,
1
],
[
0
,
-
1
,
0
]]]).
float
()
assert
torch
.
allclose
(
rotated
,
expected_rotated
,
atol
=
1e-6
)
assert
torch
.
allclose
(
mat
,
expected_mat
,
atol
=
1e-6
)
points
=
torch
.
tensor
([[[
-
0.4599
,
-
0.0471
,
0.0000
],
[
-
0.4599
,
-
0.0471
,
1.8433
]],
[[
-
0.2555
,
-
0.2683
,
0.0000
],
[
-
0.2555
,
-
0.2683
,
0.9072
]]])
rotated
=
rotation_3d_in_axis
(
points
,
np
.
pi
/
2
,
axis
=
0
)
expected_rotated
=
torch
.
tensor
([[[
-
0.4599
,
0.0000
,
-
0.0471
],
[
-
0.4599
,
-
1.8433
,
-
0.0471
]],
[[
-
0.2555
,
0.0000
,
-
0.2683
],
[
-
0.2555
,
-
0.9072
,
-
0.2683
]]])
assert
torch
.
allclose
(
rotated
,
expected_rotated
,
atol
=
1e-3
)
points
=
np
.
array
([[[
-
0.4599
,
-
0.0471
,
0.0000
],
[
-
0.4599
,
-
0.0471
,
1.8433
]],
[[
-
0.2555
,
-
0.2683
,
0.0000
],
[
-
0.2555
,
-
0.2683
,
0.9072
]]]).
astype
(
np
.
float32
)
rotated
=
rotation_3d_in_axis
(
points
,
np
.
pi
/
2
,
axis
=
0
)
expected_rotated
=
np
.
array
([[[
-
0.4599
,
0.0000
,
-
0.0471
],
[
-
0.4599
,
-
1.8433
,
-
0.0471
]],
[[
-
0.2555
,
0.0000
,
-
0.2683
],
[
-
0.2555
,
-
0.9072
,
-
0.2683
]]])
assert
np
.
allclose
(
rotated
,
expected_rotated
,
atol
=
1e-3
)
points
=
torch
.
tensor
([[[
-
0.4599
,
-
0.0471
,
0.0000
],
[
-
0.4599
,
-
0.0471
,
1.8433
]],
[[
-
0.2555
,
-
0.2683
,
0.0000
],
[
-
0.2555
,
-
0.2683
,
0.9072
]]])
angles
=
[
np
.
pi
/
2
,
-
np
.
pi
/
2
]
rotated
=
rotation_3d_in_axis
(
points
,
angles
,
axis
=
0
).
numpy
()
expected_rotated
=
np
.
array
([[[
-
0.4599
,
0.0000
,
-
0.0471
],
[
-
0.4599
,
-
1.8433
,
-
0.0471
]],
[[
-
0.2555
,
0.0000
,
0.2683
],
[
-
0.2555
,
0.9072
,
0.2683
]]])
assert
np
.
allclose
(
rotated
,
expected_rotated
,
atol
=
1e-3
)
points
=
torch
.
tensor
([[[
-
0.4599
,
-
0.0471
,
0.0000
],
[
-
0.4599
,
-
0.0471
,
1.8433
]],
[[
-
0.2555
,
-
0.2683
,
0.0000
],
[
-
0.2555
,
-
0.2683
,
0.9072
]]])
angles
=
[
np
.
pi
/
2
,
-
np
.
pi
/
2
]
rotated
=
rotation_3d_in_axis
(
points
,
angles
,
axis
=
1
).
numpy
()
expected_rotated
=
np
.
array
([[[
0.0000
,
-
0.0471
,
0.4599
],
[
1.8433
,
-
0.0471
,
0.4599
]],
[[
0.0000
,
-
0.2683
,
-
0.2555
],
[
-
0.9072
,
-
0.2683
,
-
0.2555
]]])
assert
np
.
allclose
(
rotated
,
expected_rotated
,
atol
=
1e-3
)
points
=
torch
.
tensor
([[[
-
0.4599
,
-
0.0471
,
0.0000
],
[
-
0.4599
,
0.0471
,
1.8433
]],
[[
-
0.2555
,
-
0.2683
,
0.0000
],
[
0.2555
,
-
0.2683
,
0.9072
]]])
angles
=
[
np
.
pi
/
2
,
-
np
.
pi
/
2
]
rotated
=
rotation_3d_in_axis
(
points
,
angles
,
axis
=
2
).
numpy
()
expected_rotated
=
np
.
array
([[[
0.0471
,
-
0.4599
,
0.0000
],
[
-
0.0471
,
-
0.4599
,
1.8433
]],
[[
-
0.2683
,
0.2555
,
0.0000
],
[
-
0.2683
,
-
0.2555
,
0.9072
]]])
assert
np
.
allclose
(
rotated
,
expected_rotated
,
atol
=
1e-3
)
points
=
torch
.
tensor
([[[
-
0.0471
,
0.0000
],
[
-
0.0471
,
1.8433
]],
[[
-
0.2683
,
0.0000
],
[
-
0.2683
,
0.9072
]]])
angles
=
[
np
.
pi
/
2
,
-
np
.
pi
/
2
]
rotated
=
rotation_3d_in_axis
(
points
,
angles
)
expected_rotated
=
np
.
array
([[[
0.0000
,
-
0.0471
],
[
-
1.8433
,
-
0.0471
]],
[[
0.0000
,
0.2683
],
[
0.9072
,
0.2683
]]])
assert
np
.
allclose
(
rotated
,
expected_rotated
,
atol
=
1e-3
)
def
test_rotation_2d
():
angles
=
np
.
array
([
3.14
])
corners
=
np
.
array
([[[
-
0.235
,
-
0.49
],
[
-
0.235
,
0.49
],
[
0.235
,
0.49
],
[
0.235
,
-
0.49
]]])
corners_rotated
=
rotation_3d_in_axis
(
corners
,
angles
)
expected_corners
=
np
.
array
([[[
0.2357801
,
0.48962511
],
[
0.2342193
,
-
0.49037365
],
[
-
0.2357801
,
-
0.48962511
],
[
-
0.2342193
,
0.49037365
]]])
assert
np
.
allclose
(
corners_rotated
,
expected_corners
)
def
test_limit_period
():
torch
.
manual_seed
(
0
)
...
...
@@ -1285,6 +1614,11 @@ def test_limit_period():
[
0.3074
]])
assert
torch
.
allclose
(
result
,
expected_result
,
1e-3
)
val
=
val
.
numpy
()
result
=
limit_period
(
val
)
expected_result
=
expected_result
.
numpy
()
assert
np
.
allclose
(
result
,
expected_result
,
1e-3
)
def
test_xywhr2xyxyr
():
torch
.
manual_seed
(
0
)
...
...
@@ -1324,3 +1658,139 @@ def test_points_cam2img():
[
0.6994
,
0.7782
],
[
0.5623
,
0.6303
],
[
0.4359
,
0.6532
]])
assert
torch
.
allclose
(
point_2d_res
,
expected_point_2d_res
,
1e-3
)
points
=
points
.
numpy
()
proj_mat
=
proj_mat
.
numpy
()
point_2d_res
=
points_cam2img
(
points
,
proj_mat
)
expected_point_2d_res
=
expected_point_2d_res
.
numpy
()
assert
np
.
allclose
(
point_2d_res
,
expected_point_2d_res
,
1e-3
)
points
=
torch
.
from_numpy
(
points
)
point_2d_res
=
points_cam2img
(
points
,
proj_mat
)
expected_point_2d_res
=
torch
.
from_numpy
(
expected_point_2d_res
)
assert
torch
.
allclose
(
point_2d_res
,
expected_point_2d_res
,
1e-3
)
point_2d_res
=
points_cam2img
(
points
,
proj_mat
,
with_depth
=
True
)
expected_point_2d_res
=
torch
.
tensor
([[
0.5832
,
0.6496
,
1.7577
],
[
0.6146
,
0.7910
,
1.5477
],
[
0.6994
,
0.7782
,
2.0091
],
[
0.5623
,
0.6303
,
1.8739
],
[
0.4359
,
0.6532
,
1.2056
]])
assert
torch
.
allclose
(
point_2d_res
,
expected_point_2d_res
,
1e-3
)
def
test_points_in_boxes
():
if
not
torch
.
cuda
.
is_available
():
pytest
.
skip
(
'test requires GPU and torch+cuda'
)
lidar_pts
=
torch
.
tensor
([[
1.0
,
4.3
,
0.1
],
[
1.0
,
4.4
,
0.1
],
[
1.1
,
4.3
,
0.1
],
[
0.9
,
4.3
,
0.1
],
[
1.0
,
-
0.3
,
0.1
],
[
1.0
,
-
0.4
,
0.1
],
[
2.9
,
0.1
,
6.0
],
[
-
0.9
,
3.9
,
6.0
]]).
cuda
()
lidar_boxes
=
torch
.
tensor
([[
1.0
,
2.0
,
0.0
,
4.0
,
4.0
,
6.0
,
np
.
pi
/
6
],
[
1.0
,
2.0
,
0.0
,
4.0
,
4.0
,
6.0
,
np
.
pi
/
2
],
[
1.0
,
2.0
,
0.0
,
4.0
,
4.0
,
6.0
,
7
*
np
.
pi
/
6
],
[
1.0
,
2.0
,
0.0
,
4.0
,
4.0
,
6.0
,
-
np
.
pi
/
6
]],
dtype
=
torch
.
float32
).
cuda
()
lidar_boxes
=
LiDARInstance3DBoxes
(
lidar_boxes
)
point_indices
=
lidar_boxes
.
points_in_boxes_all
(
lidar_pts
)
expected_point_indices
=
torch
.
tensor
(
[[
1
,
0
,
1
,
1
],
[
0
,
0
,
0
,
0
],
[
1
,
0
,
1
,
0
],
[
0
,
0
,
0
,
1
],
[
1
,
0
,
1
,
1
],
[
0
,
0
,
0
,
0
],
[
0
,
1
,
0
,
0
],
[
0
,
1
,
0
,
0
]],
dtype
=
torch
.
int32
).
cuda
()
assert
point_indices
.
shape
==
torch
.
Size
([
8
,
4
])
assert
(
point_indices
==
expected_point_indices
).
all
()
lidar_pts
=
torch
.
tensor
([[
1.0
,
4.3
,
0.1
],
[
1.0
,
4.4
,
0.1
],
[
1.1
,
4.3
,
0.1
],
[
0.9
,
4.3
,
0.1
],
[
1.0
,
-
0.3
,
0.1
],
[
1.0
,
-
0.4
,
0.1
],
[
2.9
,
0.1
,
6.0
],
[
-
0.9
,
3.9
,
6.0
]]).
cuda
()
lidar_boxes
=
torch
.
tensor
([[
1.0
,
2.0
,
0.0
,
4.0
,
4.0
,
6.0
,
np
.
pi
/
6
],
[
1.0
,
2.0
,
0.0
,
4.0
,
4.0
,
6.0
,
np
.
pi
/
2
],
[
1.0
,
2.0
,
0.0
,
4.0
,
4.0
,
6.0
,
7
*
np
.
pi
/
6
],
[
1.0
,
2.0
,
0.0
,
4.0
,
4.0
,
6.0
,
-
np
.
pi
/
6
]],
dtype
=
torch
.
float32
).
cuda
()
lidar_boxes
=
LiDARInstance3DBoxes
(
lidar_boxes
)
point_indices
=
lidar_boxes
.
points_in_boxes_part
(
lidar_pts
)
expected_point_indices
=
torch
.
tensor
([
0
,
-
1
,
0
,
3
,
0
,
-
1
,
1
,
1
],
dtype
=
torch
.
int32
).
cuda
()
assert
point_indices
.
shape
==
torch
.
Size
([
8
])
assert
(
point_indices
==
expected_point_indices
).
all
()
depth_boxes
=
torch
.
tensor
([[
1.0
,
2.0
,
3.0
,
4.0
,
5.0
,
6.0
,
0.3
],
[
-
10.0
,
23.0
,
16.0
,
10
,
20
,
20
,
0.5
]],
dtype
=
torch
.
float32
).
cuda
()
depth_boxes
=
DepthInstance3DBoxes
(
depth_boxes
)
depth_pts
=
torch
.
tensor
(
[[[
1
,
2
,
3.3
],
[
1.2
,
2.5
,
3.0
],
[
0.8
,
2.1
,
3.5
],
[
1.6
,
2.6
,
3.6
],
[
0.8
,
1.2
,
3.9
],
[
-
9.2
,
21.0
,
18.2
],
[
3.8
,
7.9
,
6.3
],
[
4.7
,
3.5
,
-
12.2
],
[
3.8
,
7.6
,
-
2
],
[
-
10.6
,
-
12.9
,
-
20
],
[
-
16
,
-
18
,
9
],
[
-
21.3
,
-
52
,
-
5
],
[
0
,
0
,
0
],
[
6
,
7
,
8
],
[
-
2
,
-
3
,
-
4
]]],
dtype
=
torch
.
float32
).
cuda
()
point_indices
=
depth_boxes
.
points_in_boxes_all
(
depth_pts
)
expected_point_indices
=
torch
.
tensor
(
[[
1
,
0
],
[
1
,
0
],
[
1
,
0
],
[
1
,
0
],
[
1
,
0
],
[
0
,
1
],
[
0
,
0
],
[
0
,
0
],
[
0
,
0
],
[
0
,
0
],
[
0
,
0
],
[
0
,
0
],
[
0
,
0
],
[
0
,
0
],
[
0
,
0
]],
dtype
=
torch
.
int32
).
cuda
()
assert
point_indices
.
shape
==
torch
.
Size
([
15
,
2
])
assert
(
point_indices
==
expected_point_indices
).
all
()
point_indices
=
depth_boxes
.
points_in_boxes_part
(
depth_pts
)
expected_point_indices
=
torch
.
tensor
(
[
0
,
0
,
0
,
0
,
0
,
1
,
-
1
,
-
1
,
-
1
,
-
1
,
-
1
,
-
1
,
-
1
,
-
1
,
-
1
],
dtype
=
torch
.
int32
).
cuda
()
assert
point_indices
.
shape
==
torch
.
Size
([
15
])
assert
(
point_indices
==
expected_point_indices
).
all
()
depth_boxes
=
torch
.
tensor
([[
1.0
,
2.0
,
3.0
,
4.0
,
5.0
,
6.0
,
0.3
],
[
-
10.0
,
23.0
,
16.0
,
10
,
20
,
20
,
0.5
],
[
1.0
,
2.0
,
0.0
,
4.0
,
4.0
,
6.0
,
np
.
pi
/
6
],
[
1.0
,
2.0
,
0.0
,
4.0
,
4.0
,
6.0
,
np
.
pi
/
2
],
[
1.0
,
2.0
,
0.0
,
4.0
,
4.0
,
6.0
,
7
*
np
.
pi
/
6
],
[
1.0
,
2.0
,
0.0
,
4.0
,
4.0
,
6.0
,
-
np
.
pi
/
6
]],
dtype
=
torch
.
float32
).
cuda
()
cam_boxes
=
DepthInstance3DBoxes
(
depth_boxes
).
convert_to
(
Box3DMode
.
CAM
)
depth_pts
=
torch
.
tensor
(
[[
1
,
2
,
3.3
],
[
1.2
,
2.5
,
3.0
],
[
0.8
,
2.1
,
3.5
],
[
1.6
,
2.6
,
3.6
],
[
0.8
,
1.2
,
3.9
],
[
-
9.2
,
21.0
,
18.2
],
[
3.8
,
7.9
,
6.3
],
[
4.7
,
3.5
,
-
12.2
],
[
3.8
,
7.6
,
-
2
],
[
-
10.6
,
-
12.9
,
-
20
],
[
-
16
,
-
18
,
9
],
[
-
21.3
,
-
52
,
-
5
],
[
0
,
0
,
0
],
[
6
,
7
,
8
],
[
-
2
,
-
3
,
-
4
],
[
1.0
,
4.3
,
0.1
],
[
1.0
,
4.4
,
0.1
],
[
1.1
,
4.3
,
0.1
],
[
0.9
,
4.3
,
0.1
],
[
1.0
,
-
0.3
,
0.1
],
[
1.0
,
-
0.4
,
0.1
],
[
2.9
,
0.1
,
6.0
],
[
-
0.9
,
3.9
,
6.0
]],
dtype
=
torch
.
float32
).
cuda
()
cam_pts
=
DepthPoints
(
depth_pts
).
convert_to
(
Coord3DMode
.
CAM
).
tensor
point_indices
=
cam_boxes
.
points_in_boxes_all
(
cam_pts
)
expected_point_indices
=
torch
.
tensor
(
[[
1
,
0
,
1
,
1
,
1
,
1
],
[
1
,
0
,
1
,
1
,
1
,
1
],
[
1
,
0
,
1
,
1
,
1
,
1
],
[
1
,
0
,
1
,
1
,
1
,
1
],
[
1
,
0
,
1
,
1
,
1
,
1
],
[
0
,
1
,
0
,
0
,
0
,
0
],
[
0
,
0
,
0
,
0
,
0
,
0
],
[
0
,
0
,
0
,
0
,
0
,
0
],
[
0
,
0
,
0
,
0
,
0
,
0
],
[
0
,
0
,
0
,
0
,
0
,
0
],
[
0
,
0
,
0
,
0
,
0
,
0
],
[
0
,
0
,
0
,
0
,
0
,
0
],
[
0
,
0
,
0
,
1
,
0
,
1
],
[
0
,
0
,
0
,
0
,
0
,
0
],
[
0
,
0
,
0
,
0
,
0
,
0
],
[
0
,
0
,
1
,
1
,
1
,
1
],
[
0
,
0
,
0
,
1
,
0
,
0
],
[
0
,
0
,
0
,
1
,
0
,
1
],
[
0
,
0
,
1
,
1
,
1
,
0
],
[
0
,
0
,
1
,
1
,
1
,
1
],
[
0
,
0
,
0
,
1
,
0
,
0
],
[
1
,
0
,
0
,
0
,
0
,
0
],
[
1
,
0
,
0
,
0
,
0
,
0
]],
dtype
=
torch
.
int32
).
cuda
()
assert
point_indices
.
shape
==
torch
.
Size
([
23
,
6
])
assert
(
point_indices
==
expected_point_indices
).
all
()
point_indices
=
cam_boxes
.
points_in_boxes_batch
(
cam_pts
)
assert
(
point_indices
==
expected_point_indices
).
all
()
point_indices
=
cam_boxes
.
points_in_boxes_part
(
cam_pts
)
expected_point_indices
=
torch
.
tensor
([
0
,
0
,
0
,
0
,
0
,
1
,
-
1
,
-
1
,
-
1
,
-
1
,
-
1
,
-
1
,
3
,
-
1
,
-
1
,
2
,
3
,
3
,
2
,
2
,
3
,
0
,
0
],
dtype
=
torch
.
int32
).
cuda
()
assert
point_indices
.
shape
==
torch
.
Size
([
23
])
assert
(
point_indices
==
expected_point_indices
).
all
()
point_indices
=
cam_boxes
.
points_in_boxes
(
cam_pts
)
assert
(
point_indices
==
expected_point_indices
).
all
()
tests/test_utils/test_box_np_ops.py
View file @
32a4328b
...
...
@@ -20,7 +20,7 @@ def test_camera_to_lidar():
def
test_box_camera_to_lidar
():
from
mmdet3d.core.bbox.box_np_ops
import
box_camera_to_lidar
box
=
np
.
array
([[
1.84
,
1.47
,
8.41
,
1.2
,
1.89
,
0.48
,
0.01
]])
box
=
np
.
array
([[
1.84
,
1.47
,
8.41
,
1.2
,
1.89
,
0.48
,
-
0.01
]])
rect
=
np
.
array
([[
0.9999128
,
0.01009263
,
-
0.00851193
,
0.
],
[
-
0.01012729
,
0.9999406
,
-
0.00403767
,
0.
],
[
0.00847068
,
0.00412352
,
0.9999556
,
0.
],
[
0.
,
0.
,
0.
,
...
...
@@ -30,8 +30,9 @@ def test_box_camera_to_lidar():
[
0.9999753
,
0.00693114
,
-
0.0011439
,
-
0.3321029
],
[
0.
,
0.
,
0.
,
1.
]])
box_lidar
=
box_camera_to_lidar
(
box
,
rect
,
Trv2c
)
expected_box
=
np
.
array
(
[[
8.73138192
,
-
1.85591746
,
-
1.59969933
,
0.48
,
1.2
,
1.89
,
0.01
]])
expected_box
=
np
.
array
([[
8.73138192
,
-
1.85591746
,
-
1.59969933
,
1.2
,
0.48
,
1.89
,
0.01
-
np
.
pi
/
2
]])
assert
np
.
allclose
(
box_lidar
,
expected_box
)
...
...
@@ -48,22 +49,35 @@ def test_center_to_corner_box2d():
from
mmdet3d.core.bbox.box_np_ops
import
center_to_corner_box2d
center
=
np
.
array
([[
9.348705
,
-
3.6271024
]])
dims
=
np
.
array
([[
0.47
,
0.98
]])
angles
=
np
.
array
([
-
3.14
])
angles
=
np
.
array
([
3.14
])
corner
=
center_to_corner_box2d
(
center
,
dims
,
angles
)
expected_corner
=
np
.
array
([[[
9.584485
,
-
3.1374772
],
[
9.582925
,
-
4.117476
],
[
9.112926
,
-
4.1167274
],
[
9.114486
,
-
3.1367288
]]])
assert
np
.
allclose
(
corner
,
expected_corner
)
center
=
np
.
array
([[
-
0.0
,
0.0
]])
dims
=
np
.
array
([[
4.0
,
8.0
]])
angles
=
np
.
array
([
-
0.785398
])
# -45 degrees
corner
=
center_to_corner_box2d
(
center
,
dims
,
angles
)
expected_corner
=
np
.
array
([[[
-
4.24264
,
-
1.41421
],
[
1.41421
,
4.24264
],
[
4.24264
,
1.41421
],
[
-
1.41421
,
-
4.24264
]]])
assert
np
.
allclose
(
corner
,
expected_corner
)
def
test_points_in_convex_polygon_jit
():
from
mmdet3d.core.bbox.box_np_ops
import
points_in_convex_polygon_jit
points
=
np
.
array
([[
0.4
,
0.4
],
[
0.5
,
0.5
],
[
0.6
,
0.6
]])
polygons
=
np
.
array
([[[
1.0
,
0.0
],
[
0.0
,
1.0
],
[
0.0
,
0.5
],
[
0.0
,
0.0
]],
[[
1.0
,
0.0
],
[
1.0
,
1.0
],
[
0.5
,
1.0
],
[
0.0
,
1.0
]],
[[
1.0
,
0.0
],
[
0.0
,
1.0
],
[
-
1.0
,
0.0
],
[
0.0
,
-
1.0
]]])
res
=
points_in_convex_polygon_jit
(
points
,
polygons
)
expected_res
=
np
.
array
([[
1
,
0
,
1
],
[
0
,
0
,
0
],
[
0
,
1
,
0
]]).
astype
(
np
.
bool
)
assert
np
.
allclose
(
res
,
expected_res
)
def
test_rotation_2d
():
from
mmdet3d.core.bbox.box_np_ops
import
rotation_2d
angles
=
np
.
array
([
-
3.14
])
corners
=
np
.
array
([[[
-
0.235
,
-
0.49
],
[
-
0.235
,
0.49
],
[
0.235
,
0.49
],
[
0.235
,
-
0.49
]]])
corners_rotated
=
rotation_2d
(
corners
,
angles
)
expected_corners
=
np
.
array
([[[
0.2357801
,
0.48962511
],
[
0.2342193
,
-
0.49037365
],
[
-
0.2357801
,
-
0.48962511
],
[
-
0.2342193
,
0.49037365
]]])
assert
np
.
allclose
(
corners_rotated
,
expected_corners
)
polygons
=
np
.
array
([[[
0.0
,
0.0
],
[
0.0
,
1.0
],
[
0.5
,
0.5
],
[
1.0
,
0.0
]],
[[
0.0
,
1.0
],
[
1.0
,
1.0
],
[
1.0
,
0.5
],
[
1.0
,
0.0
]],
[[
1.0
,
0.0
],
[
0.0
,
-
1.0
],
[
-
1.0
,
0.0
],
[
0.0
,
1.1
]]])
res
=
points_in_convex_polygon_jit
(
points
,
polygons
,
clockwise
=
True
)
expected_res
=
np
.
array
([[
1
,
0
,
1
],
[
0
,
0
,
1
],
[
0
,
1
,
0
]]).
astype
(
np
.
bool
)
assert
np
.
allclose
(
res
,
expected_res
)
tests/test_utils/test_coord_3d_mode.py
View file @
32a4328b
...
...
@@ -3,7 +3,8 @@ import numpy as np
import
torch
from
mmdet3d.core.bbox
import
(
CameraInstance3DBoxes
,
Coord3DMode
,
DepthInstance3DBoxes
,
LiDARInstance3DBoxes
)
DepthInstance3DBoxes
,
LiDARInstance3DBoxes
,
limit_period
)
from
mmdet3d.core.points
import
CameraPoints
,
DepthPoints
,
LiDARPoints
...
...
@@ -242,22 +243,31 @@ def test_boxes_conversion():
convert_lidar_boxes
=
Coord3DMode
.
convert
(
cam_boxes
,
Coord3DMode
.
CAM
,
Coord3DMode
.
LIDAR
)
expected_tensor
=
torch
.
tensor
(
[[
-
1.7501
,
-
1.7802
,
-
2.5162
,
1.6500
,
1.7500
,
3.3900
,
1.4800
],
[
-
1.6357
,
-
8.9594
,
-
2.4567
,
1.5700
,
1.5400
,
4.0100
,
1.6200
],
[
-
1.3033
,
-
28.2967
,
0.5558
,
1.4800
,
1.4700
,
2.2300
,
-
1.5700
],
[
-
1.7361
,
-
26.6690
,
-
21.8230
,
1.4000
,
1.5600
,
3.4800
,
-
1.6900
],
[
-
1.6218
,
-
31.3198
,
-
8.1621
,
1.4800
,
1.7400
,
3.7700
,
2.7900
]])
expected_tensor
=
torch
.
tensor
([[
-
1.7501
,
-
1.7802
,
-
2.5162
,
1.7500
,
1.6500
,
3.3900
,
-
1.4800
-
np
.
pi
/
2
],
[
-
1.6357
,
-
8.9594
,
-
2.4567
,
1.5400
,
1.5700
,
4.0100
,
-
1.6200
-
np
.
pi
/
2
],
[
-
1.3033
,
-
28.2967
,
0.5558
,
1.4700
,
1.4800
,
2.2300
,
1.5700
-
np
.
pi
/
2
],
[
-
1.7361
,
-
26.6690
,
-
21.8230
,
1.5600
,
1.4000
,
3.4800
,
1.6900
-
np
.
pi
/
2
],
[
-
1.6218
,
-
31.3198
,
-
8.1621
,
1.7400
,
1.4800
,
3.7700
,
-
2.7900
-
np
.
pi
/
2
]])
expected_tensor
[:,
-
1
:]
=
limit_period
(
expected_tensor
[:,
-
1
:],
period
=
np
.
pi
*
2
)
assert
torch
.
allclose
(
expected_tensor
,
convert_lidar_boxes
.
tensor
,
1e-3
)
convert_depth_boxes
=
Coord3DMode
.
convert
(
cam_boxes
,
Coord3DMode
.
CAM
,
Coord3DMode
.
DEPTH
)
expected_tensor
=
torch
.
tensor
(
[[
1.7802
,
1.7501
,
2.5162
,
1.7500
,
1.6500
,
3.3900
,
1.4800
],
[
8.9594
,
1.6357
,
2.4567
,
1.5400
,
1.5700
,
4.0100
,
1.6200
],
[
28.2967
,
1.3033
,
-
0.5558
,
1.4700
,
1.4800
,
2.2300
,
-
1.5700
],
[
26.6690
,
1.7361
,
21.8230
,
1.5600
,
1.4000
,
3.4800
,
-
1.6900
],
[
31.3198
,
1.6218
,
8.1621
,
1.7400
,
1.4800
,
3.7700
,
2.7900
]])
[[
1.7802
,
-
1.7501
,
-
2.5162
,
1.7500
,
1.6500
,
3.3900
,
-
1.4800
],
[
8.9594
,
-
1.6357
,
-
2.4567
,
1.5400
,
1.5700
,
4.0100
,
-
1.6200
],
[
28.2967
,
-
1.3033
,
0.5558
,
1.4700
,
1.4800
,
2.2300
,
1.5700
],
[
26.6690
,
-
1.7361
,
-
21.8230
,
1.5600
,
1.4000
,
3.4800
,
1.6900
],
[
31.3198
,
-
1.6218
,
-
8.1621
,
1.7400
,
1.4800
,
3.7700
,
-
2.7900
]])
assert
torch
.
allclose
(
expected_tensor
,
convert_depth_boxes
.
tensor
,
1e-3
)
# test LIDAR to CAM and DEPTH
...
...
@@ -269,22 +279,42 @@ def test_boxes_conversion():
[
31.31978
,
8.162144
,
-
1.6217787
,
1.74
,
3.77
,
1.48
,
2.79
]])
convert_cam_boxes
=
Coord3DMode
.
convert
(
lidar_boxes
,
Coord3DMode
.
LIDAR
,
Coord3DMode
.
CAM
)
expected_tensor
=
torch
.
tensor
(
[[
-
2.5162
,
1.7501
,
1.7802
,
3.3900
,
1.6500
,
1.7500
,
1.4800
],
[
-
2.4567
,
1.6357
,
8.9594
,
4.0100
,
1.5700
,
1.5400
,
1.6200
],
[
0.5558
,
1.3033
,
28.2967
,
2.2300
,
1.4800
,
1.4700
,
-
1.5700
],
[
-
21.8230
,
1.7361
,
26.6690
,
3.4800
,
1.4000
,
1.5600
,
-
1.6900
],
[
-
8.1621
,
1.6218
,
31.3198
,
3.7700
,
1.4800
,
1.7400
,
2.7900
]])
expected_tensor
=
torch
.
tensor
([
[
-
2.5162
,
1.7501
,
1.7802
,
1.7500
,
1.6500
,
3.3900
,
-
1.4800
-
np
.
pi
/
2
],
[
-
2.4567
,
1.6357
,
8.9594
,
1.5400
,
1.5700
,
4.0100
,
-
1.6200
-
np
.
pi
/
2
],
[
0.5558
,
1.3033
,
28.2967
,
1.4700
,
1.4800
,
2.2300
,
1.5700
-
np
.
pi
/
2
],
[
-
21.8230
,
1.7361
,
26.6690
,
1.5600
,
1.4000
,
3.4800
,
1.6900
-
np
.
pi
/
2
],
[
-
8.1621
,
1.6218
,
31.3198
,
1.7400
,
1.4800
,
3.7700
,
-
2.7900
-
np
.
pi
/
2
]
])
expected_tensor
[:,
-
1
:]
=
limit_period
(
expected_tensor
[:,
-
1
:],
period
=
np
.
pi
*
2
)
assert
torch
.
allclose
(
expected_tensor
,
convert_cam_boxes
.
tensor
,
1e-3
)
convert_depth_boxes
=
Coord3DMode
.
convert
(
lidar_boxes
,
Coord3DMode
.
LIDAR
,
Coord3DMode
.
DEPTH
)
expected_tensor
=
torch
.
tensor
(
[[
-
2.5162
,
1.7802
,
-
1.7501
,
3.3900
,
1.7500
,
1.6500
,
1.4800
],
[
-
2.4567
,
8.9594
,
-
1.6357
,
4.0100
,
1.5400
,
1.5700
,
1.6200
],
[
0.5558
,
28.2967
,
-
1.3033
,
2.2300
,
1.4700
,
1.4800
,
-
1.5700
],
[
-
21.8230
,
26.6690
,
-
1.7361
,
3.4800
,
1.5600
,
1.4000
,
-
1.6900
],
[
-
8.1621
,
31.3198
,
-
1.6218
,
3.7700
,
1.7400
,
1.4800
,
2.7900
]])
expected_tensor
=
torch
.
tensor
([[
-
2.5162
,
1.7802
,
-
1.7501
,
1.7500
,
3.3900
,
1.6500
,
1.4800
+
np
.
pi
/
2
],
[
-
2.4567
,
8.9594
,
-
1.6357
,
1.5400
,
4.0100
,
1.5700
,
1.6200
+
np
.
pi
/
2
],
[
0.5558
,
28.2967
,
-
1.3033
,
1.4700
,
2.2300
,
1.4800
,
-
1.5700
+
np
.
pi
/
2
],
[
-
21.8230
,
26.6690
,
-
1.7361
,
1.5600
,
3.4800
,
1.4000
,
-
1.6900
+
np
.
pi
/
2
],
[
-
8.1621
,
31.3198
,
-
1.6218
,
1.7400
,
3.7700
,
1.4800
,
2.7900
+
np
.
pi
/
2
]])
expected_tensor
[:,
-
1
:]
=
limit_period
(
expected_tensor
[:,
-
1
:],
period
=
np
.
pi
*
2
)
assert
torch
.
allclose
(
expected_tensor
,
convert_depth_boxes
.
tensor
,
1e-3
)
# test DEPTH to CAM and LIDAR
...
...
@@ -297,19 +327,25 @@ def test_boxes_conversion():
convert_cam_boxes
=
Coord3DMode
.
convert
(
depth_boxes
,
Coord3DMode
.
DEPTH
,
Coord3DMode
.
CAM
)
expected_tensor
=
torch
.
tensor
(
[[
1.7802
,
-
1.7501
,
-
2.5162
,
1.7500
,
1.6500
,
3.3900
,
1.4800
],
[
8.9594
,
-
1.6357
,
-
2.4567
,
1.5400
,
1.5700
,
4.0100
,
1.6200
],
[
28.2967
,
-
1.3033
,
0.5558
,
1.4700
,
1.4800
,
2.2300
,
-
1.5700
],
[
26.6690
,
-
1.7361
,
-
21.8230
,
1.5600
,
1.4000
,
3.4800
,
-
1.6900
],
[
31.3198
,
-
1.6218
,
-
8.1621
,
1.7400
,
1.4800
,
3.7700
,
2.7900
]])
[[
1.7802
,
1.7501
,
2.5162
,
1.7500
,
1.6500
,
3.3900
,
-
1.4800
],
[
8.9594
,
1.6357
,
2.4567
,
1.5400
,
1.5700
,
4.0100
,
-
1.6200
],
[
28.2967
,
1.3033
,
-
0.5558
,
1.4700
,
1.4800
,
2.2300
,
1.5700
],
[
26.6690
,
1.7361
,
21.8230
,
1.5600
,
1.4000
,
3.4800
,
1.6900
],
[
31.3198
,
1.6218
,
8.1621
,
1.7400
,
1.4800
,
3.7700
,
-
2.7900
]])
assert
torch
.
allclose
(
expected_tensor
,
convert_cam_boxes
.
tensor
,
1e-3
)
convert_lidar_boxes
=
Coord3DMode
.
convert
(
depth_boxes
,
Coord3DMode
.
DEPTH
,
Coord3DMode
.
LIDAR
)
expected_tensor
=
torch
.
tensor
(
[[
2.5162
,
-
1.7802
,
-
1.7501
,
3.3900
,
1.7500
,
1.6500
,
1.4800
],
[
2.4567
,
-
8.9594
,
-
1.6357
,
4.0100
,
1.5400
,
1.5700
,
1.6200
],
[
-
0.5558
,
-
28.2967
,
-
1.3033
,
2.2300
,
1.4700
,
1.4800
,
-
1.5700
],
[
21.8230
,
-
26.6690
,
-
1.7361
,
3.4800
,
1.5600
,
1.4000
,
-
1.6900
],
[
8.1621
,
-
31.3198
,
-
1.6218
,
3.7700
,
1.7400
,
1.4800
,
2.7900
]])
expected_tensor
=
torch
.
tensor
([[
2.5162
,
-
1.7802
,
-
1.7501
,
1.7500
,
3.3900
,
1.6500
,
1.4800
-
np
.
pi
/
2
],
[
2.4567
,
-
8.9594
,
-
1.6357
,
1.5400
,
4.0100
,
1.5700
,
1.6200
-
np
.
pi
/
2
],
[
-
0.5558
,
-
28.2967
,
-
1.3033
,
1.4700
,
2.2300
,
1.4800
,
-
1.5700
-
np
.
pi
/
2
],
[
21.8230
,
-
26.6690
,
-
1.7361
,
1.5600
,
3.4800
,
1.4000
,
-
1.6900
-
np
.
pi
/
2
],
[
8.1621
,
-
31.3198
,
-
1.6218
,
1.7400
,
3.7700
,
1.4800
,
2.7900
-
np
.
pi
/
2
]])
expected_tensor
[:,
-
1
:]
=
limit_period
(
expected_tensor
[:,
-
1
:],
period
=
np
.
pi
*
2
)
assert
torch
.
allclose
(
expected_tensor
,
convert_lidar_boxes
.
tensor
,
1e-3
)
tests/test_utils/test_points.py
View file @
32a4328b
...
...
@@ -66,6 +66,7 @@ def test_base_points():
]])
assert
torch
.
allclose
(
expected_tensor
,
base_points
.
tensor
)
assert
torch
.
allclose
(
expected_tensor
[:,
:
2
],
base_points
.
bev
)
assert
torch
.
allclose
(
expected_tensor
[:,
:
3
],
base_points
.
coord
)
assert
torch
.
allclose
(
expected_tensor
[:,
3
:
6
],
base_points
.
color
)
assert
torch
.
allclose
(
expected_tensor
[:,
6
],
base_points
.
height
)
...
...
@@ -327,6 +328,7 @@ def test_cam_points():
]])
assert
torch
.
allclose
(
expected_tensor
,
cam_points
.
tensor
)
assert
torch
.
allclose
(
expected_tensor
[:,
[
0
,
2
]],
cam_points
.
bev
)
assert
torch
.
allclose
(
expected_tensor
[:,
:
3
],
cam_points
.
coord
)
assert
torch
.
allclose
(
expected_tensor
[:,
3
:
6
],
cam_points
.
color
)
assert
torch
.
allclose
(
expected_tensor
[:,
6
],
cam_points
.
height
)
...
...
@@ -603,6 +605,7 @@ def test_lidar_points():
]])
assert
torch
.
allclose
(
expected_tensor
,
lidar_points
.
tensor
)
assert
torch
.
allclose
(
expected_tensor
[:,
:
2
],
lidar_points
.
bev
)
assert
torch
.
allclose
(
expected_tensor
[:,
:
3
],
lidar_points
.
coord
)
assert
torch
.
allclose
(
expected_tensor
[:,
3
:
6
],
lidar_points
.
color
)
assert
torch
.
allclose
(
expected_tensor
[:,
6
],
lidar_points
.
height
)
...
...
@@ -879,6 +882,7 @@ def test_depth_points():
]])
assert
torch
.
allclose
(
expected_tensor
,
depth_points
.
tensor
)
assert
torch
.
allclose
(
expected_tensor
[:,
:
2
],
depth_points
.
bev
)
assert
torch
.
allclose
(
expected_tensor
[:,
:
3
],
depth_points
.
coord
)
assert
torch
.
allclose
(
expected_tensor
[:,
3
:
6
],
depth_points
.
color
)
assert
torch
.
allclose
(
expected_tensor
[:,
6
],
depth_points
.
height
)
...
...
Prev
1
…
15
16
17
18
19
20
21
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment