Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
mmdetection3d
Commits
d7067e44
Unverified
Commit
d7067e44
authored
Dec 03, 2022
by
Wenwei Zhang
Committed by
GitHub
Dec 03, 2022
Browse files
Bump version to v1.1.0rc2
Bump to v1.1.0rc2
parents
28fe73d2
fb0e57e5
Changes
360
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
403 additions
and
214 deletions
+403
-214
tests/test_models/test_detectors/test_imvotenet.py
tests/test_models/test_detectors/test_imvotenet.py
+8
-8
tests/test_models/test_detectors/test_imvoxelnet.py
tests/test_models/test_detectors/test_imvoxelnet.py
+5
-5
tests/test_models/test_detectors/test_mvxnet.py
tests/test_models/test_detectors/test_mvxnet.py
+5
-5
tests/test_models/test_detectors/test_parta2.py
tests/test_models/test_detectors/test_parta2.py
+8
-9
tests/test_models/test_detectors/test_pointrcnn.py
tests/test_models/test_detectors/test_pointrcnn.py
+5
-5
tests/test_models/test_detectors/test_pvrcnn.py
tests/test_models/test_detectors/test_pvrcnn.py
+63
-0
tests/test_models/test_detectors/test_sassd.py
tests/test_models/test_detectors/test_sassd.py
+43
-0
tests/test_models/test_detectors/test_votenet.py
tests/test_models/test_detectors/test_votenet.py
+7
-9
tests/test_models/test_detectors/test_voxelnet.py
tests/test_models/test_detectors/test_voxelnet.py
+7
-8
tests/test_models/test_losses/test_rotated_iou_loss.py
tests/test_models/test_losses/test_rotated_iou_loss.py
+27
-0
tests/test_models/test_necks/test_second_fpn.py
tests/test_models/test_necks/test_second_fpn.py
+9
-9
tests/test_models/test_task_modules/test_anchor/test_anchor_3d_generator.py
...test_task_modules/test_anchor/test_anchor_3d_generator.py
+30
-30
tests/test_models/test_task_modules/test_voxel/test_voxel_generator.py
...dels/test_task_modules/test_voxel/test_voxel_generator.py
+7
-9
tools/create_data.py
tools/create_data.py
+5
-9
tools/dataset_converters/create_gt_database.py
tools/dataset_converters/create_gt_database.py
+3
-3
tools/dataset_converters/lyft_converter.py
tools/dataset_converters/lyft_converter.py
+1
-0
tools/dataset_converters/s3dis_data_utils.py
tools/dataset_converters/s3dis_data_utils.py
+1
-1
tools/dataset_converters/update_infos_to_v2.py
tools/dataset_converters/update_infos_to_v2.py
+148
-90
tools/dataset_converters/waymo_converter.py
tools/dataset_converters/waymo_converter.py
+10
-11
tools/misc/browse_dataset.py
tools/misc/browse_dataset.py
+11
-3
No files found.
tests/test_models/test_detectors/test_imvotenet.py
View file @
d7067e44
...
...
@@ -4,8 +4,8 @@ import torch
from
mmengine
import
DefaultScope
from
mmdet3d.registry
import
MODELS
from
tests.utils.model_utils
import
(
_
create_detector_inputs
,
_get_detector_cfg
,
_
setup_seed
)
from
mmdet3d.testing
import
(
create_detector_inputs
,
get_detector_cfg
,
setup_seed
)
class
TestImvoteNet
(
unittest
.
TestCase
):
...
...
@@ -15,12 +15,12 @@ class TestImvoteNet(unittest.TestCase):
assert
hasattr
(
mmdet3d
.
models
,
'ImVoteNet'
)
DefaultScope
.
get_instance
(
'test_imvotenet_img'
,
scope_name
=
'mmdet3d'
)
_
setup_seed
(
0
)
votenet_net_cfg
=
_
get_detector_cfg
(
setup_seed
(
0
)
votenet_net_cfg
=
get_detector_cfg
(
'imvotenet/imvotenet_faster-rcnn-r50_fpn_4xb2_sunrgbd-3d.py'
)
model
=
MODELS
.
build
(
votenet_net_cfg
)
packed_inputs
=
_
create_detector_inputs
(
packed_inputs
=
create_detector_inputs
(
with_points
=
False
,
with_img
=
True
,
img_size
=
128
)
if
torch
.
cuda
.
is_available
():
...
...
@@ -49,12 +49,12 @@ class TestImvoteNet(unittest.TestCase):
assert
hasattr
(
mmdet3d
.
models
,
'ImVoteNet'
)
DefaultScope
.
get_instance
(
'test_imvotenet'
,
scope_name
=
'mmdet3d'
)
_
setup_seed
(
0
)
votenet_net_cfg
=
_
get_detector_cfg
(
setup_seed
(
0
)
votenet_net_cfg
=
get_detector_cfg
(
'imvotenet/imvotenet_stage2_8xb16_sunrgbd-3d.py'
)
model
=
MODELS
.
build
(
votenet_net_cfg
)
packed_inputs
=
_
create_detector_inputs
(
packed_inputs
=
create_detector_inputs
(
with_points
=
True
,
with_img
=
True
,
img_size
=
128
,
...
...
tests/test_models/test_detectors/test_imvoxelnet.py
View file @
d7067e44
...
...
@@ -4,8 +4,8 @@ import torch
from
mmengine
import
DefaultScope
from
mmdet3d.registry
import
MODELS
from
tests.utils.model_utils
import
(
_
create_detector_inputs
,
_get_detector_cfg
,
_
setup_seed
)
from
mmdet3d.testing
import
(
create_detector_inputs
,
get_detector_cfg
,
setup_seed
)
class
TestImVoxelNet
(
unittest
.
TestCase
):
...
...
@@ -15,12 +15,12 @@ class TestImVoxelNet(unittest.TestCase):
assert
hasattr
(
mmdet3d
.
models
,
'ImVoxelNet'
)
DefaultScope
.
get_instance
(
'test_ImVoxelNet'
,
scope_name
=
'mmdet3d'
)
_
setup_seed
(
0
)
imvoxel_net_cfg
=
_
get_detector_cfg
(
setup_seed
(
0
)
imvoxel_net_cfg
=
get_detector_cfg
(
'imvoxelnet/imvoxelnet_8xb4_kitti-3d-car.py'
)
model
=
MODELS
.
build
(
imvoxel_net_cfg
)
num_gt_instance
=
1
packed_inputs
=
_
create_detector_inputs
(
packed_inputs
=
create_detector_inputs
(
with_points
=
False
,
with_img
=
True
,
img_size
=
(
128
,
128
),
...
...
tests/test_models/test_detectors/test_mvxnet.py
View file @
d7067e44
...
...
@@ -4,8 +4,8 @@ import torch
from
mmengine
import
DefaultScope
from
mmdet3d.registry
import
MODELS
from
tests.utils.model_utils
import
(
_
create_detector_inputs
,
_get_detector_cfg
,
_
setup_seed
)
from
mmdet3d.testing
import
(
create_detector_inputs
,
get_detector_cfg
,
setup_seed
)
class
TestMVXNet
(
unittest
.
TestCase
):
...
...
@@ -15,14 +15,14 @@ class TestMVXNet(unittest.TestCase):
assert
hasattr
(
mmdet3d
.
models
,
'DynamicMVXFasterRCNN'
)
_
setup_seed
(
0
)
setup_seed
(
0
)
DefaultScope
.
get_instance
(
'test_mvxnet'
,
scope_name
=
'mmdet3d'
)
mvx_net_cfg
=
_
get_detector_cfg
(
mvx_net_cfg
=
get_detector_cfg
(
'mvxnet/mvxnet_fpn_dv_second_secfpn_8xb2-80e_kitti-3d-3class.py'
# noqa
)
model
=
MODELS
.
build
(
mvx_net_cfg
)
num_gt_instance
=
1
packed_inputs
=
_
create_detector_inputs
(
packed_inputs
=
create_detector_inputs
(
with_img
=
False
,
num_gt_instance
=
num_gt_instance
,
points_feat_dim
=
4
)
if
torch
.
cuda
.
is_available
():
...
...
tests/test_models/test_detectors/test_parta2.py
View file @
d7067e44
...
...
@@ -4,8 +4,8 @@ import torch
from
mmengine
import
DefaultScope
from
mmdet3d.registry
import
MODELS
from
tests.utils.model_utils
import
(
_
create_detector_inputs
,
_get_detector_cfg
,
_
setup_seed
)
from
mmdet3d.testing
import
(
create_detector_inputs
,
get_detector_cfg
,
setup_seed
)
class
TestPartA2
(
unittest
.
TestCase
):
...
...
@@ -15,18 +15,17 @@ class TestPartA2(unittest.TestCase):
assert
hasattr
(
mmdet3d
.
models
,
'PartA2'
)
DefaultScope
.
get_instance
(
'test_parta2'
,
scope_name
=
'mmdet3d'
)
_
setup_seed
(
0
)
parta2_cfg
=
_
get_detector_cfg
(
'parta2/
P
art
A
2_hv_secfpn_8xb2-cyclic-80e_kitti-3d-3class.py'
)
setup_seed
(
0
)
parta2_cfg
=
get_detector_cfg
(
'parta2/
p
art
a
2_hv_secfpn_8xb2-cyclic-80e_kitti-3d-3class.py'
)
model
=
MODELS
.
build
(
parta2_cfg
)
num_gt_instance
=
2
packed_inputs
=
_create_detector_inputs
(
num_gt_instance
=
num_gt_instance
)
packed_inputs
=
create_detector_inputs
(
num_gt_instance
=
num_gt_instance
)
# TODO: Support aug data test
# aug_packed_inputs = [
#
_
create_detector_inputs(num_gt_instance=num_gt_instance),
#
_
create_detector_inputs(num_gt_instance=num_gt_instance + 1)
# create_detector_inputs(num_gt_instance=num_gt_instance),
# create_detector_inputs(num_gt_instance=num_gt_instance + 1)
# ]
# test_aug_test
# metainfo = {
...
...
tests/test_models/test_detectors/test_pointrcnn.py
View file @
d7067e44
...
...
@@ -4,8 +4,8 @@ import torch
from
mmengine
import
DefaultScope
from
mmdet3d.registry
import
MODELS
from
tests.utils.model_utils
import
(
_
create_detector_inputs
,
_get_detector_cfg
,
_
setup_seed
)
from
mmdet3d.testing
import
(
create_detector_inputs
,
get_detector_cfg
,
setup_seed
)
class
TestPointRCNN
(
unittest
.
TestCase
):
...
...
@@ -15,12 +15,12 @@ class TestPointRCNN(unittest.TestCase):
assert
hasattr
(
mmdet3d
.
models
,
'PointRCNN'
)
DefaultScope
.
get_instance
(
'test_pointrcnn'
,
scope_name
=
'mmdet3d'
)
_
setup_seed
(
0
)
pointrcnn_cfg
=
_
get_detector_cfg
(
setup_seed
(
0
)
pointrcnn_cfg
=
get_detector_cfg
(
'point_rcnn/point-rcnn_8xb2_kitti-3d-3class.py'
)
model
=
MODELS
.
build
(
pointrcnn_cfg
)
num_gt_instance
=
2
packed_inputs
=
_
create_detector_inputs
(
packed_inputs
=
create_detector_inputs
(
num_points
=
10101
,
num_gt_instance
=
num_gt_instance
)
if
torch
.
cuda
.
is_available
():
...
...
tests/test_models/test_detectors/test_pvrcnn.py
0 → 100644
View file @
d7067e44
import
unittest
import
torch
from
mmengine
import
DefaultScope
from
mmdet3d.registry
import
MODELS
from
mmdet3d.testing
import
(
create_detector_inputs
,
get_detector_cfg
,
setup_seed
)
class
TestPVRCNN
(
unittest
.
TestCase
):
def
test_pvrcnn
(
self
):
import
mmdet3d.models
assert
hasattr
(
mmdet3d
.
models
,
'PointVoxelRCNN'
)
DefaultScope
.
get_instance
(
'test_pvrcnn'
,
scope_name
=
'mmdet3d'
)
setup_seed
(
0
)
pvrcnn_cfg
=
get_detector_cfg
(
'pv_rcnn/pv_rcnn_8xb2-80e_kitti-3d-3class.py'
)
model
=
MODELS
.
build
(
pvrcnn_cfg
)
num_gt_instance
=
2
packed_inputs
=
create_detector_inputs
(
num_gt_instance
=
num_gt_instance
)
# TODO: Support aug data test
# aug_packed_inputs = [
# create_detector_inputs(num_gt_instance=num_gt_instance),
# create_detector_inputs(num_gt_instance=num_gt_instance + 1)
# ]
# test_aug_test
# metainfo = {
# 'pcd_scale_factor': 1,
# 'pcd_horizontal_flip': 1,
# 'pcd_vertical_flip': 1,
# 'box_type_3d': LiDARInstance3DBoxes
# }
# for item in aug_packed_inputs:
# for batch_id in len(item['data_samples']):
# item['data_samples'][batch_id].set_metainfo(metainfo)
if
torch
.
cuda
.
is_available
():
model
=
model
.
cuda
()
# test simple_test
with
torch
.
no_grad
():
data
=
model
.
data_preprocessor
(
packed_inputs
,
True
)
torch
.
cuda
.
empty_cache
()
results
=
model
.
forward
(
**
data
,
mode
=
'predict'
)
self
.
assertEqual
(
len
(
results
),
1
)
self
.
assertIn
(
'bboxes_3d'
,
results
[
0
].
pred_instances_3d
)
self
.
assertIn
(
'scores_3d'
,
results
[
0
].
pred_instances_3d
)
self
.
assertIn
(
'labels_3d'
,
results
[
0
].
pred_instances_3d
)
# save the memory
with
torch
.
no_grad
():
losses
=
model
.
forward
(
**
data
,
mode
=
'loss'
)
torch
.
cuda
.
empty_cache
()
self
.
assertGreater
(
losses
[
'loss_rpn_cls'
][
0
],
0
)
self
.
assertGreaterEqual
(
losses
[
'loss_rpn_bbox'
][
0
],
0
)
self
.
assertGreaterEqual
(
losses
[
'loss_rpn_dir'
][
0
],
0
)
self
.
assertGreater
(
losses
[
'loss_semantic'
],
0
)
self
.
assertGreaterEqual
(
losses
[
'loss_bbox'
],
0
)
self
.
assertGreaterEqual
(
losses
[
'loss_cls'
],
0
)
self
.
assertGreaterEqual
(
losses
[
'loss_corner'
],
0
)
tests/test_models/test_detectors/test_sassd.py
0 → 100644
View file @
d7067e44
import
unittest
import
torch
from
mmengine
import
DefaultScope
from
mmdet3d.registry
import
MODELS
from
mmdet3d.testing
import
(
create_detector_inputs
,
get_detector_cfg
,
setup_seed
)
class
TestSDSSD
(
unittest
.
TestCase
):
def
test_3dssd
(
self
):
import
mmdet3d.models
assert
hasattr
(
mmdet3d
.
models
,
'SASSD'
)
DefaultScope
.
get_instance
(
'test_sassd'
,
scope_name
=
'mmdet3d'
)
setup_seed
(
0
)
voxel_net_cfg
=
get_detector_cfg
(
'sassd/sassd_8xb6-80e_kitti-3d-3class.py'
)
model
=
MODELS
.
build
(
voxel_net_cfg
)
num_gt_instance
=
3
packed_inputs
=
create_detector_inputs
(
num_gt_instance
=
num_gt_instance
,
num_classes
=
1
)
if
torch
.
cuda
.
is_available
():
model
=
model
.
cuda
()
# test simple_test
with
torch
.
no_grad
():
data
=
model
.
data_preprocessor
(
packed_inputs
,
True
)
torch
.
cuda
.
empty_cache
()
results
=
model
.
forward
(
**
data
,
mode
=
'predict'
)
self
.
assertEqual
(
len
(
results
),
1
)
self
.
assertIn
(
'bboxes_3d'
,
results
[
0
].
pred_instances_3d
)
self
.
assertIn
(
'scores_3d'
,
results
[
0
].
pred_instances_3d
)
self
.
assertIn
(
'labels_3d'
,
results
[
0
].
pred_instances_3d
)
losses
=
model
.
forward
(
**
data
,
mode
=
'loss'
)
self
.
assertGreaterEqual
(
losses
[
'loss_dir'
][
0
],
0
)
self
.
assertGreaterEqual
(
losses
[
'loss_bbox'
][
0
],
0
)
self
.
assertGreaterEqual
(
losses
[
'loss_cls'
][
0
],
0
)
self
.
assertGreater
(
losses
[
'aux_loss_cls'
][
0
],
0
)
self
.
assertGreater
(
losses
[
'aux_loss_reg'
][
0
],
0
)
tests/test_models/test_detectors/test_votenet.py
View file @
d7067e44
...
...
@@ -4,8 +4,8 @@ import torch
from
mmengine
import
DefaultScope
from
mmdet3d.registry
import
MODELS
from
tests.utils.model_utils
import
(
_
create_detector_inputs
,
_get_detector_cfg
,
_
setup_seed
)
from
mmdet3d.testing
import
(
create_detector_inputs
,
get_detector_cfg
,
setup_seed
)
class
TestVotenet
(
unittest
.
TestCase
):
...
...
@@ -15,18 +15,16 @@ class TestVotenet(unittest.TestCase):
assert
hasattr
(
mmdet3d
.
models
,
'VoteNet'
)
DefaultScope
.
get_instance
(
'test_vote_net'
,
scope_name
=
'mmdet3d'
)
_setup_seed
(
0
)
voxel_net_cfg
=
_get_detector_cfg
(
'votenet/votenet_8xb16_sunrgbd-3d.py'
)
setup_seed
(
0
)
voxel_net_cfg
=
get_detector_cfg
(
'votenet/votenet_8xb16_sunrgbd-3d.py'
)
model
=
MODELS
.
build
(
voxel_net_cfg
)
num_gt_instance
=
50
packed_inputs
=
_create_detector_inputs
(
num_gt_instance
=
num_gt_instance
)
packed_inputs
=
create_detector_inputs
(
num_gt_instance
=
num_gt_instance
)
# TODO: Support aug test
# aug_data = [
#
_
create_detector_inputs(num_gt_instance=num_gt_instance),
#
_
create_detector_inputs(num_gt_instance=num_gt_instance + 1)
# create_detector_inputs(num_gt_instance=num_gt_instance),
# create_detector_inputs(num_gt_instance=num_gt_instance + 1)
# ]
# # test_aug_test
# metainfo = {
...
...
tests/test_models/test_detectors/test_voxelnet.py
View file @
d7067e44
...
...
@@ -5,8 +5,8 @@ import torch
from
mmengine
import
DefaultScope
from
mmdet3d.registry
import
MODELS
from
tests.utils.model_utils
import
(
_
create_detector_inputs
,
_get_detector_cfg
,
_
setup_seed
)
from
mmdet3d.testing
import
(
create_detector_inputs
,
get_detector_cfg
,
setup_seed
)
class
TestVoxelNet
(
unittest
.
TestCase
):
...
...
@@ -16,18 +16,17 @@ class TestVoxelNet(unittest.TestCase):
assert
hasattr
(
mmdet3d
.
models
,
'VoxelNet'
)
DefaultScope
.
get_instance
(
'test_voxelnet'
,
scope_name
=
'mmdet3d'
)
_
setup_seed
(
0
)
pointpillars_cfg
=
_
get_detector_cfg
(
setup_seed
(
0
)
pointpillars_cfg
=
get_detector_cfg
(
'pointpillars/pointpillars_hv_secfpn_8xb6-160e_kitti-3d-3class.py'
)
model
=
MODELS
.
build
(
pointpillars_cfg
)
num_gt_instance
=
2
packed_inputs
=
_create_detector_inputs
(
num_gt_instance
=
num_gt_instance
)
packed_inputs
=
create_detector_inputs
(
num_gt_instance
=
num_gt_instance
)
# TODO: Support aug_test
# aug_data = [
#
_
create_detector_inputs(num_gt_instance=num_gt_instance),
#
_
create_detector_inputs(num_gt_instance=num_gt_instance + 1)
# create_detector_inputs(num_gt_instance=num_gt_instance),
# create_detector_inputs(num_gt_instance=num_gt_instance + 1)
# ]
# # test_aug_test
# metainfo = {
...
...
tests/test_models/test_losses/test_rotated_iou_loss.py
0 → 100644
View file @
d7067e44
# Copyright (c) OpenMMLab. All rights reserved.
import
numpy
as
np
import
torch
from
mmdet3d.models.losses
import
RotatedIoU3DLoss
def
test_rotated_iou_3d_loss
():
if
not
torch
.
cuda
.
is_available
():
return
boxes1
=
torch
.
tensor
([[.
5
,
.
5
,
.
5
,
1.
,
1.
,
1.
,
.
0
],
[.
5
,
.
5
,
.
5
,
1.
,
1.
,
1.
,
.
0
],
[.
5
,
.
5
,
.
5
,
1.
,
1.
,
1.
,
.
0
],
[.
5
,
.
5
,
.
5
,
1.
,
1.
,
1.
,
.
0
],
[.
5
,
.
5
,
.
5
,
1.
,
1.
,
1.
,
.
0
]]).
cuda
()
boxes2
=
torch
.
tensor
([[.
5
,
.
5
,
.
5
,
1.
,
1.
,
1.
,
.
0
],
[.
5
,
.
5
,
.
5
,
1.
,
1.
,
2.
,
np
.
pi
/
2
],
[.
5
,
.
5
,
.
5
,
1.
,
1.
,
1.
,
np
.
pi
/
4
],
[
1.
,
1.
,
1.
,
1.
,
1.
,
1.
,
.
0
],
[
-
1.5
,
-
1.5
,
-
1.5
,
2.5
,
2.5
,
2.5
,
.
0
]]).
cuda
()
expect_ious
=
1
-
torch
.
tensor
([[
1.
,
.
5
,
.
7071
,
1
/
15
,
.
0
]]).
cuda
()
ious
=
RotatedIoU3DLoss
(
reduction
=
'none'
)(
boxes1
,
boxes2
)
assert
torch
.
allclose
(
ious
,
expect_ious
,
atol
=
1e-4
)
tests/test_models/test_necks/test_second_fpn.py
View file @
d7067e44
...
...
@@ -45,8 +45,8 @@ def test_centerpoint_fpn():
second_cfg
=
dict
(
type
=
'SECOND'
,
in_channels
=
64
,
out_channels
=
[
64
,
128
,
2
56
],
in_channels
=
2
,
out_channels
=
[
2
,
2
,
2
],
layer_nums
=
[
3
,
5
,
5
],
layer_strides
=
[
2
,
2
,
2
],
norm_cfg
=
dict
(
type
=
'BN'
,
eps
=
1e-3
,
momentum
=
0.01
),
...
...
@@ -57,8 +57,8 @@ def test_centerpoint_fpn():
# centerpoint usage of fpn
centerpoint_fpn_cfg
=
dict
(
type
=
'SECONDFPN'
,
in_channels
=
[
64
,
128
,
2
56
],
out_channels
=
[
128
,
128
,
128
],
in_channels
=
[
2
,
2
,
2
],
out_channels
=
[
2
,
2
,
2
],
upsample_strides
=
[
0.5
,
1
,
2
],
norm_cfg
=
dict
(
type
=
'BN'
,
eps
=
1e-3
,
momentum
=
0.01
),
upsample_cfg
=
dict
(
type
=
'deconv'
,
bias
=
False
),
...
...
@@ -67,17 +67,17 @@ def test_centerpoint_fpn():
# original usage of fpn
fpn_cfg
=
dict
(
type
=
'SECONDFPN'
,
in_channels
=
[
64
,
128
,
2
56
],
in_channels
=
[
2
,
2
,
2
],
upsample_strides
=
[
1
,
2
,
4
],
out_channels
=
[
128
,
128
,
128
])
out_channels
=
[
2
,
2
,
2
])
second_fpn
=
build_neck
(
fpn_cfg
)
centerpoint_second_fpn
=
build_neck
(
centerpoint_fpn_cfg
)
input
=
torch
.
rand
([
4
,
64
,
51
2
,
51
2
])
input
=
torch
.
rand
([
2
,
2
,
3
2
,
3
2
])
sec_output
=
second
(
input
)
centerpoint_output
=
centerpoint_second_fpn
(
sec_output
)
second_output
=
second_fpn
(
sec_output
)
assert
centerpoint_output
[
0
].
shape
==
torch
.
Size
([
4
,
384
,
12
8
,
12
8
])
assert
second_output
[
0
].
shape
==
torch
.
Size
([
4
,
384
,
25
6
,
25
6
])
assert
centerpoint_output
[
0
].
shape
==
torch
.
Size
([
2
,
6
,
8
,
8
])
assert
second_output
[
0
].
shape
==
torch
.
Size
([
2
,
6
,
1
6
,
1
6
])
tests/test_models/test_task_modules/test_anchor/test_anchor_3d_generator.py
View file @
d7067e44
...
...
@@ -45,10 +45,10 @@ def test_anchor_3d_range_generator():
'
\n
rotations=[0, 1.57],
\n
reshape_out=False,'
\
'
\n
size_per_range=True)'
assert
repr_str
==
expected_repr_str
featmap_size
=
(
256
,
256
)
featmap_size
=
(
8
,
8
)
mr_anchors
=
anchor_generator
.
single_level_grid_anchors
(
featmap_size
,
1.1
,
device
=
device
)
assert
mr_anchors
.
shape
==
torch
.
Size
([
1
,
256
,
256
,
3
,
2
,
7
])
assert
mr_anchors
.
shape
==
torch
.
Size
([
1
,
8
,
8
,
3
,
2
,
7
])
def
test_aligned_anchor_generator
():
...
...
@@ -80,108 +80,108 @@ def test_aligned_anchor_generator():
size_per_range
=
False
,
reshape_out
=
True
)
featmap_sizes
=
[(
25
6
,
25
6
),
(
12
8
,
12
8
),
(
6
4
,
6
4
)]
featmap_sizes
=
[(
1
6
,
1
6
),
(
8
,
8
),
(
4
,
4
)]
anchor_generator
=
TASK_UTILS
.
build
(
anchor_generator_cfg
)
assert
anchor_generator
.
num_base_anchors
==
8
# check base anchors
expected_grid_anchors
=
[
torch
.
tensor
([[
-
51
.0000
,
-
51
.0000
,
-
1.8000
,
2.5981
,
0.8660
,
1.0000
,
0.0000
,
-
48
.0000
,
-
48
.0000
,
-
1.8000
,
2.5981
,
0.8660
,
1.0000
,
0.0000
,
0.0000
,
0.0000
],
[
-
51
.0000
,
-
51
.0000
,
-
1.8000
,
0.4000
,
0.4000
,
1.0000
,
-
48
.0000
,
-
48
.0000
,
-
1.8000
,
0.4000
,
0.4000
,
1.0000
,
1.5700
,
0.0000
,
0.0000
],
[
-
50
.6000
,
-
51
.0000
,
-
1.8000
,
0.4000
,
0.4000
,
1.0000
,
-
41
.6000
,
-
48
.0000
,
-
1.8000
,
0.4000
,
0.4000
,
1.0000
,
0.0000
,
0.0000
,
0.0000
],
[
-
5
0
.2000
,
-
51
.0000
,
-
1.8000
,
1.0000
,
1.0000
,
1.0000
,
-
3
5.2000
,
-
48
.0000
,
-
1.8000
,
1.0000
,
1.0000
,
1.0000
,
1.5700
,
0.0000
,
0.0000
],
[
-
49
.8000
,
-
51
.0000
,
-
1.8000
,
1.0000
,
1.0000
,
1.0000
,
-
28
.8000
,
-
48
.0000
,
-
1.8000
,
1.0000
,
1.0000
,
1.0000
,
0.0000
,
0.0000
,
0.0000
],
[
-
49
.4000
,
-
51
.0000
,
-
1.8000
,
1.7321
,
0.5774
,
1.0000
,
-
22
.4000
,
-
48
.0000
,
-
1.8000
,
1.7321
,
0.5774
,
1.0000
,
1.5700
,
0.0000
,
0.0000
],
[
-
49
.0000
,
-
51
.0000
,
-
1.8000
,
1.7321
,
0.5774
,
1.0000
,
-
16
.0000
,
-
48
.0000
,
-
1.8000
,
1.7321
,
0.5774
,
1.0000
,
0.0000
,
0.0000
,
0.0000
],
[
-
48
.6000
,
-
51
.0000
,
-
1.8000
,
2.5981
,
0.8660
,
1.0000
,
-
9
.6000
,
-
48
.0000
,
-
1.8000
,
2.5981
,
0.8660
,
1.0000
,
1.5700
,
0.0000
,
0.0000
]],
device
=
device
),
torch
.
tensor
([[
-
50
.8000
,
-
50
.8000
,
-
1.8000
,
5.1962
,
1.7320
,
2.0000
,
0.0000
,
-
44
.8000
,
-
44
.8000
,
-
1.8000
,
5.1962
,
1.7320
,
2.0000
,
0.0000
,
0.0000
,
0.0000
],
[
-
50
.8000
,
-
50
.8000
,
-
1.8000
,
0.8000
,
0.8000
,
2.0000
,
-
44
.8000
,
-
44
.8000
,
-
1.8000
,
0.8000
,
0.8000
,
2.0000
,
1.5700
,
0.0000
,
0.0000
],
[
-
50
.0000
,
-
50
.8000
,
-
1.8000
,
0.8000
,
0.8000
,
2.0000
,
-
32
.0000
,
-
44
.8000
,
-
1.8000
,
0.8000
,
0.8000
,
2.0000
,
0.0000
,
0.0000
,
0.0000
],
[
-
4
9.2000
,
-
50
.8000
,
-
1.8000
,
2.0000
,
2.0000
,
2.0000
,
-
1
9.2000
,
-
44
.8000
,
-
1.8000
,
2.0000
,
2.0000
,
2.0000
,
1.5700
,
0.0000
,
0.0000
],
[
-
48
.4000
,
-
50
.8000
,
-
1.8000
,
2.0000
,
2.0000
,
2.0000
,
-
6
.4000
,
-
44
.8000
,
-
1.8000
,
2.0000
,
2.0000
,
2.0000
,
0.0000
,
0.0000
,
0.0000
],
[
-
47.6
000
,
-
50
.8000
,
-
1.8000
,
3.4642
,
1.1548
,
2.0000
,
6.4
000
,
-
44
.8000
,
-
1.8000
,
3.4642
,
1.1548
,
2.0000
,
1.5700
,
0.0000
,
0.0000
],
[
-
46.8
000
,
-
50
.8000
,
-
1.8000
,
3.4642
,
1.1548
,
2.0000
,
19.2
000
,
-
44
.8000
,
-
1.8000
,
3.4642
,
1.1548
,
2.0000
,
0.0000
,
0.0000
,
0.0000
],
[
-
46
.0000
,
-
50
.8000
,
-
1.8000
,
5.1962
,
1.7320
,
2.0000
,
32
.0000
,
-
44
.8000
,
-
1.8000
,
5.1962
,
1.7320
,
2.0000
,
1.5700
,
0.0000
,
0.0000
]],
device
=
device
),
torch
.
tensor
([[
-
50
.4000
,
-
50
.4000
,
-
1.8000
,
10.3924
,
3.4640
,
4.0000
,
0.0000
,
-
38
.4000
,
-
38
.4000
,
-
1.8000
,
10.3924
,
3.4640
,
4.0000
,
0.0000
,
0.0000
,
0.0000
],
[
-
50
.4000
,
-
50
.4000
,
-
1.8000
,
1.6000
,
1.6000
,
4.0000
,
-
38
.4000
,
-
38
.4000
,
-
1.8000
,
1.6000
,
1.6000
,
4.0000
,
1.5700
,
0.0000
,
0.0000
],
[
-
48
.8000
,
-
50
.4000
,
-
1.8000
,
1.6000
,
1.6000
,
4.0000
,
-
12
.8000
,
-
38
.4000
,
-
1.8000
,
1.6000
,
1.6000
,
4.0000
,
0.0000
,
0.0000
,
0.0000
],
[
-
47.2
000
,
-
50
.4000
,
-
1.8000
,
4.0000
,
4.0000
,
4.0000
,
12.8
000
,
-
38
.4000
,
-
1.8000
,
4.0000
,
4.0000
,
4.0000
,
1.5700
,
0.0000
,
0.0000
],
[
-
45.6
000
,
-
50
.4000
,
-
1.8000
,
4.0000
,
4.0000
,
4.0000
,
38.4
000
,
-
38
.4000
,
-
1.8000
,
4.0000
,
4.0000
,
4.0000
,
0.0000
,
0.0000
,
0.0000
],
[
-
44.0
000
,
-
50.4
000
,
-
1.8000
,
6.9284
,
2.3096
,
4.0000
,
-
38.4
000
,
-
12.8
000
,
-
1.8000
,
6.9284
,
2.3096
,
4.0000
,
1.5700
,
0.0000
,
0.0000
],
[
-
4
2.
4
000
,
-
50.4
000
,
-
1.8000
,
6.9284
,
2.3096
,
4.0000
,
-
1
2.
8
000
,
-
12.8
000
,
-
1.8000
,
6.9284
,
2.3096
,
4.0000
,
0.0000
,
0.0000
,
0.0000
],
[
-
40
.8000
,
-
50.4
000
,
-
1.8000
,
10.3924
,
3.4640
,
4.0000
,
12
.8000
,
-
12.8
000
,
-
1.8000
,
10.3924
,
3.4640
,
4.0000
,
1.5700
,
0.0000
,
0.0000
]],
device
=
device
)
...
...
@@ -189,9 +189,9 @@ def test_aligned_anchor_generator():
multi_level_anchors
=
anchor_generator
.
grid_anchors
(
featmap_sizes
,
device
=
device
)
expected_multi_level_shapes
=
[
torch
.
Size
([
52428
8
,
9
]),
torch
.
Size
([
1
3107
2
,
9
]),
torch
.
Size
([
3276
8
,
9
])
torch
.
Size
([
204
8
,
9
]),
torch
.
Size
([
5
12
,
9
]),
torch
.
Size
([
12
8
,
9
])
]
for
i
,
single_level_anchor
in
enumerate
(
multi_level_anchors
):
assert
single_level_anchor
.
shape
==
expected_multi_level_shapes
[
i
]
...
...
tests/test_models/test_task_modules/test_voxel/test_voxel_generator.py
View file @
d7067e44
...
...
@@ -6,17 +6,15 @@ from mmdet3d.models.task_modules.voxel import VoxelGenerator
def
test_voxel_generator
():
np
.
random
.
seed
(
0
)
voxel_size
=
[
0.
5
,
0.
5
,
0.5
]
point_cloud_range
=
[
0
,
-
4
0
,
-
3
,
70.4
,
40
,
1
]
max_num_points
=
1000
voxel_size
=
[
5
,
5
,
1
]
point_cloud_range
=
[
0
,
0
,
0
,
20
,
40
,
4
]
max_num_points
=
5
self
=
VoxelGenerator
(
voxel_size
,
point_cloud_range
,
max_num_points
)
points
=
np
.
random
.
rand
(
100
0
,
4
)
points
=
np
.
random
.
uniform
(
0
,
4
,
(
2
0
,
3
)
)
voxels
=
self
.
generate
(
points
)
voxels
,
coors
,
num_points_per_voxel
=
voxels
expected_coors
=
np
.
array
([[
7
,
81
,
1
],
[
6
,
81
,
0
],
[
7
,
80
,
1
],
[
6
,
81
,
1
],
[
7
,
81
,
0
],
[
6
,
80
,
1
],
[
7
,
80
,
0
],
[
6
,
80
,
0
]])
expected_num_points_per_voxel
=
np
.
array
(
[
120
,
121
,
127
,
134
,
115
,
127
,
125
,
131
])
assert
voxels
.
shape
==
(
8
,
1000
,
4
)
expected_coors
=
np
.
array
([[
2
,
0
,
0
],
[
3
,
0
,
0
],
[
0
,
0
,
0
],
[
1
,
0
,
0
]])
expected_num_points_per_voxel
=
np
.
array
([
5
,
5
,
5
,
3
])
assert
voxels
.
shape
==
(
4
,
5
,
3
)
assert
np
.
all
(
coors
==
expected_coors
)
assert
np
.
all
(
num_points_per_voxel
==
expected_num_points_per_voxel
)
tools/create_data.py
View file @
d7067e44
...
...
@@ -35,9 +35,6 @@ def kitti_data_prep(root_path,
info_train_path
=
osp
.
join
(
out_dir
,
f
'
{
info_prefix
}
_infos_train.pkl'
)
info_val_path
=
osp
.
join
(
out_dir
,
f
'
{
info_prefix
}
_infos_val.pkl'
)
info_trainval_path
=
osp
.
join
(
out_dir
,
f
'
{
info_prefix
}
_infos_trainval.pkl'
)
kitti
.
export_2d_annotation
(
root_path
,
info_train_path
)
kitti
.
export_2d_annotation
(
root_path
,
info_val_path
)
kitti
.
export_2d_annotation
(
root_path
,
info_trainval_path
)
update_pkl_infos
(
'kitti'
,
out_dir
=
out_dir
,
pkl_path
=
info_train_path
)
update_pkl_infos
(
'kitti'
,
out_dir
=
out_dir
,
pkl_path
=
info_val_path
)
update_pkl_infos
(
'kitti'
,
out_dir
=
out_dir
,
pkl_path
=
info_trainval_path
)
...
...
@@ -76,17 +73,11 @@ def nuscenes_data_prep(root_path,
if
version
==
'v1.0-test'
:
info_test_path
=
osp
.
join
(
out_dir
,
f
'
{
info_prefix
}
_infos_test.pkl'
)
nuscenes_converter
.
export_2d_annotation
(
root_path
,
info_test_path
,
version
=
version
)
update_pkl_infos
(
'nuscenes'
,
out_dir
=
out_dir
,
pkl_path
=
info_test_path
)
return
info_train_path
=
osp
.
join
(
out_dir
,
f
'
{
info_prefix
}
_infos_train.pkl'
)
info_val_path
=
osp
.
join
(
out_dir
,
f
'
{
info_prefix
}
_infos_val.pkl'
)
nuscenes_converter
.
export_2d_annotation
(
root_path
,
info_train_path
,
version
=
version
)
nuscenes_converter
.
export_2d_annotation
(
root_path
,
info_val_path
,
version
=
version
)
update_pkl_infos
(
'nuscenes'
,
out_dir
=
out_dir
,
pkl_path
=
info_train_path
)
update_pkl_infos
(
'nuscenes'
,
out_dir
=
out_dir
,
pkl_path
=
info_val_path
)
create_groundtruth_database
(
dataset_name
,
root_path
,
info_prefix
,
...
...
@@ -265,6 +256,11 @@ args = parser.parse_args()
if
__name__
==
'__main__'
:
from
mmdet3d.utils
import
register_all_modules
register_all_modules
()
# Set to spawn mode to avoid stuck when process dataset creating
import
multiprocessing
multiprocessing
.
set_start_method
(
'spawn'
)
if
args
.
dataset
==
'kitti'
:
kitti_data_prep
(
root_path
=
args
.
root_path
,
...
...
tools/dataset_converters/create_gt_database.py
View file @
d7067e44
...
...
@@ -6,13 +6,13 @@ import mmcv
import
mmengine
import
numpy
as
np
from
mmcv.ops
import
roi_align
from
mmdet.evaluation
import
bbox_overlaps
from
mmengine
import
track_iter_progress
from
pycocotools
import
mask
as
maskUtils
from
pycocotools.coco
import
COCO
from
mmdet3d.datasets
import
build_dataset
from
mmdet3d.structures.ops
import
box_np_ops
as
box_np_ops
from
mmdet.evaluation
import
bbox_overlaps
def
_poly2mask
(
mask_ann
,
img_h
,
img_w
):
...
...
@@ -243,7 +243,7 @@ def create_groundtruth_database(dataset_class_name,
image_idx
=
example
[
'sample_idx'
]
points
=
example
[
'points'
].
tensor
.
numpy
()
gt_boxes_3d
=
annos
[
'gt_bboxes_3d'
].
tensor
.
numpy
()
names
=
[
dataset
.
metainfo
[
'
CLASSES
'
][
i
]
for
i
in
annos
[
'gt_labels_3d'
]]
names
=
[
dataset
.
metainfo
[
'
classes
'
][
i
]
for
i
in
annos
[
'gt_labels_3d'
]]
group_dict
=
dict
()
if
'group_ids'
in
annos
:
group_ids
=
annos
[
'group_ids'
]
...
...
@@ -409,7 +409,7 @@ class GTDatabaseCreater:
points
=
example
[
'points'
].
tensor
.
numpy
()
gt_boxes_3d
=
annos
[
'gt_bboxes_3d'
].
tensor
.
numpy
()
names
=
[
self
.
dataset
.
metainfo
[
'
CLASSES
'
][
i
]
for
i
in
annos
[
'gt_labels_3d'
]
self
.
dataset
.
metainfo
[
'
classes
'
][
i
]
for
i
in
annos
[
'gt_labels_3d'
]
]
group_dict
=
dict
()
if
'group_ids'
in
annos
:
...
...
tools/dataset_converters/lyft_converter.py
View file @
d7067e44
...
...
@@ -131,6 +131,7 @@ def _fill_trainval_infos(lyft,
info
=
{
'lidar_path'
:
lidar_path
,
'num_features'
:
5
,
'token'
:
sample
[
'token'
],
'sweeps'
:
[],
'cams'
:
dict
(),
...
...
tools/dataset_converters/s3dis_data_utils.py
View file @
d7067e44
...
...
@@ -129,7 +129,7 @@ class S3DISData(object):
- gt_num (int): Number of boxes.
"""
bboxes
,
labels
=
[],
[]
for
i
in
range
(
1
,
pts_instance_mask
.
max
()):
for
i
in
range
(
1
,
pts_instance_mask
.
max
()
+
1
):
ids
=
pts_instance_mask
==
i
# check if all instance points have same semantic label
assert
pts_semantic_mask
[
ids
].
min
()
==
pts_semantic_mask
[
ids
].
max
()
...
...
tools/dataset_converters/update_infos_to_v2.py
View file @
d7067e44
...
...
@@ -4,7 +4,7 @@
Example:
python tools/dataset_converters/update_infos_to_v2.py
--dataset kitti
--pkl ./data/kitti/kitti_infos_train.pkl
--pkl
-path
./data/kitti/kitti_infos_train.pkl
--out-dir ./kitti_v2/
"""
...
...
@@ -12,6 +12,7 @@ import argparse
import
copy
import
time
from
os
import
path
as
osp
from
pathlib
import
Path
import
mmengine
import
numpy
as
np
...
...
@@ -80,9 +81,6 @@ def get_empty_lidar_points():
num_pts_feats
=
None
,
# (str, optional): Path of LiDAR data file.
lidar_path
=
None
,
# (list[list[float]]): Transformation matrix from lidar
# or depth to image with shape [4, 4].
lidar2img
=
None
,
# (list[list[float]], optional): Transformation matrix
# from lidar to ego-vehicle
# with shape [4, 4].
...
...
@@ -120,6 +118,9 @@ def get_empty_img_info():
# matrix from camera to image with
# shape [3, 3], [3, 4] or [4, 4].
cam2img
=
None
,
# (list[list[float]]): Transformation matrix from lidar
# or depth to image with shape [4, 4].
lidar2img
=
None
,
# (list[list[float]], optional) : Transformation
# matrix from camera to ego-vehicle
# with shape [4, 4].
...
...
@@ -159,7 +160,7 @@ def get_empty_standard_data_info(
data_info
=
dict
(
# (str): Sample id of the frame.
sample_id
=
None
,
sample_id
x
=
None
,
# (str, optional): '000010'
token
=
None
,
**
get_single_image_sweep
(
camera_types
),
...
...
@@ -261,13 +262,9 @@ def update_nuscenes_infos(pkl_path, out_dir):
print
(
f
'Reading from input file:
{
pkl_path
}
.'
)
data_list
=
mmengine
.
load
(
pkl_path
)
METAINFO
=
{
'
CLASSES
'
:
'
classes
'
:
(
'car'
,
'truck'
,
'trailer'
,
'bus'
,
'construction_vehicle'
,
'bicycle'
,
'motorcycle'
,
'pedestrian'
,
'traffic_cone'
,
'barrier'
),
'DATASET'
:
'Nuscenes'
,
'version'
:
data_list
[
'metadata'
][
'version'
]
}
nusc
=
NuScenes
(
version
=
data_list
[
'metadata'
][
'version'
],
...
...
@@ -287,8 +284,8 @@ def update_nuscenes_infos(pkl_path, out_dir):
ori_info_dict
[
'ego2global_translation'
])
temp_data_info
[
'lidar_points'
][
'num_pts_feats'
]
=
ori_info_dict
.
get
(
'num_features'
,
5
)
temp_data_info
[
'lidar_points'
][
'lidar_path'
]
=
ori_info_dict
[
'lidar_path'
].
split
(
'/'
)[
-
1
]
temp_data_info
[
'lidar_points'
][
'lidar_path'
]
=
Path
(
ori_info_dict
[
'lidar_path'
]).
name
temp_data_info
[
'lidar_points'
][
'lidar2ego'
]
=
convert_quaternion_to_matrix
(
ori_info_dict
[
'lidar2ego_rotation'
],
...
...
@@ -318,8 +315,8 @@ def update_nuscenes_infos(pkl_path, out_dir):
temp_data_info
[
'images'
]
=
{}
for
cam
in
ori_info_dict
[
'cams'
]:
empty_img_info
=
get_empty_img_info
()
empty_img_info
[
'img_path'
]
=
ori_info_dict
[
'cams'
][
cam
][
'data_path'
].
split
(
'/'
)[
-
1
]
empty_img_info
[
'img_path'
]
=
Path
(
ori_info_dict
[
'cams'
][
cam
][
'data_path'
]).
name
empty_img_info
[
'cam2img'
]
=
ori_info_dict
[
'cams'
][
cam
][
'cam_intrinsic'
].
tolist
()
empty_img_info
[
'sample_data_token'
]
=
ori_info_dict
[
'cams'
][
cam
][
...
...
@@ -344,8 +341,8 @@ def update_nuscenes_infos(pkl_path, out_dir):
empty_instance
=
get_empty_instance
()
empty_instance
[
'bbox_3d'
]
=
ori_info_dict
[
'gt_boxes'
][
i
,
:].
tolist
()
if
ori_info_dict
[
'gt_names'
][
i
]
in
METAINFO
[
'
CLASSES
'
]:
empty_instance
[
'bbox_label'
]
=
METAINFO
[
'
CLASSES
'
].
index
(
if
ori_info_dict
[
'gt_names'
][
i
]
in
METAINFO
[
'
classes
'
]:
empty_instance
[
'bbox_label'
]
=
METAINFO
[
'
classes
'
].
index
(
ori_info_dict
[
'gt_names'
][
i
])
else
:
ignore_class_name
.
add
(
ori_info_dict
[
'gt_names'
][
i
])
...
...
@@ -363,11 +360,20 @@ def update_nuscenes_infos(pkl_path, out_dir):
ori_info_dict
,
nusc
)
temp_data_info
,
_
=
clear_data_info_unused_keys
(
temp_data_info
)
converted_list
.
append
(
temp_data_info
)
pkl_name
=
pkl_path
.
split
(
'/'
)[
-
1
]
pkl_name
=
Path
(
pkl_path
).
name
out_path
=
osp
.
join
(
out_dir
,
pkl_name
)
print
(
f
'Writing to output file:
{
out_path
}
.'
)
print
(
f
'ignore classes:
{
ignore_class_name
}
'
)
converted_data_info
=
dict
(
metainfo
=
METAINFO
,
data_list
=
converted_list
)
metainfo
=
dict
()
metainfo
[
'categories'
]
=
{
k
:
i
for
i
,
k
in
enumerate
(
METAINFO
[
'classes'
])}
if
ignore_class_name
:
for
ignore_class
in
ignore_class_name
:
metainfo
[
'categories'
][
ignore_class
]
=
-
1
metainfo
[
'dataset'
]
=
'nuscenes'
metainfo
[
'version'
]
=
data_list
[
'metadata'
][
'version'
]
metainfo
[
'info_version'
]
=
'1.1'
converted_data_info
=
dict
(
metainfo
=
metainfo
,
data_list
=
converted_list
)
mmengine
.
dump
(
converted_data_info
,
out_path
,
'pkl'
)
...
...
@@ -381,7 +387,7 @@ def update_kitti_infos(pkl_path, out_dir):
# TODO update to full label
# TODO discuss how to process 'Van', 'DontCare'
METAINFO
=
{
'
CLASSES
'
:
(
'Pedestrian'
,
'Cyclist'
,
'Car'
,
'Van'
,
'Truck'
,
'
classes
'
:
(
'Pedestrian'
,
'Cyclist'
,
'Car'
,
'Van'
,
'Truck'
,
'Person_sitting'
,
'Tram'
,
'Misc'
),
}
print
(
f
'Reading from input file:
{
pkl_path
}
.'
)
...
...
@@ -405,15 +411,15 @@ def update_kitti_infos(pkl_path, out_dir):
temp_data_info
[
'images'
][
'CAM3'
][
'cam2img'
]
=
ori_info_dict
[
'calib'
][
'P3'
].
tolist
()
temp_data_info
[
'images'
][
'CAM2'
][
'img_path'
]
=
ori_info_dict
[
'image'
][
'image_path'
].
split
(
'/'
)[
-
1
]
temp_data_info
[
'images'
][
'CAM2'
][
'img_path'
]
=
Path
(
ori_info_dict
[
'image'
][
'image_path'
]).
name
h
,
w
=
ori_info_dict
[
'image'
][
'image_shape'
]
temp_data_info
[
'images'
][
'CAM2'
][
'height'
]
=
h
temp_data_info
[
'images'
][
'CAM2'
][
'width'
]
=
w
temp_data_info
[
'lidar_points'
][
'num_pts_feats'
]
=
ori_info_dict
[
'point_cloud'
][
'num_features'
]
temp_data_info
[
'lidar_points'
][
'lidar_path'
]
=
ori_info_dict
[
'point_cloud'
][
'velodyne_path'
]
.
split
(
'/'
)[
-
1
]
temp_data_info
[
'lidar_points'
][
'lidar_path'
]
=
Path
(
ori_info_dict
[
'point_cloud'
][
'velodyne_path'
]
).
name
rect
=
ori_info_dict
[
'calib'
][
'R0_rect'
].
astype
(
np
.
float32
)
Trv2c
=
ori_info_dict
[
'calib'
][
'Tr_velo_to_cam'
].
astype
(
np
.
float32
)
...
...
@@ -446,8 +452,8 @@ def update_kitti_infos(pkl_path, out_dir):
empty_instance
=
get_empty_instance
()
empty_instance
[
'bbox'
]
=
anns
[
'bbox'
][
instance_id
].
tolist
()
if
anns
[
'name'
][
instance_id
]
in
METAINFO
[
'
CLASSES
'
]:
empty_instance
[
'bbox_label'
]
=
METAINFO
[
'
CLASSES
'
].
index
(
if
anns
[
'name'
][
instance_id
]
in
METAINFO
[
'
classes
'
]:
empty_instance
[
'bbox_label'
]
=
METAINFO
[
'
classes
'
].
index
(
anns
[
'name'
][
instance_id
])
else
:
ignore_class_name
.
add
(
anns
[
'name'
][
instance_id
])
...
...
@@ -493,12 +499,20 @@ def update_kitti_infos(pkl_path, out_dir):
temp_data_info
[
'cam_instances'
]
=
cam_instances
temp_data_info
,
_
=
clear_data_info_unused_keys
(
temp_data_info
)
converted_list
.
append
(
temp_data_info
)
pkl_name
=
pkl_path
.
split
(
'/'
)[
-
1
]
pkl_name
=
Path
(
pkl_path
).
name
out_path
=
osp
.
join
(
out_dir
,
pkl_name
)
print
(
f
'Writing to output file:
{
out_path
}
.'
)
print
(
f
'ignore classes:
{
ignore_class_name
}
'
)
converted_data_info
=
dict
(
metainfo
=
{
'DATASET'
:
'KITTI'
},
data_list
=
converted_list
)
# dataset metainfo
metainfo
=
dict
()
metainfo
[
'categories'
]
=
{
k
:
i
for
i
,
k
in
enumerate
(
METAINFO
[
'classes'
])}
if
ignore_class_name
:
for
ignore_class
in
ignore_class_name
:
metainfo
[
'categories'
][
ignore_class
]
=
-
1
metainfo
[
'dataset'
]
=
'kitti'
metainfo
[
'info_version'
]
=
'1.1'
converted_data_info
=
dict
(
metainfo
=
metainfo
,
data_list
=
converted_list
)
mmengine
.
dump
(
converted_data_info
,
out_path
,
'pkl'
)
...
...
@@ -509,7 +523,7 @@ def update_s3dis_infos(pkl_path, out_dir):
print
(
f
'Warning, you may overwriting '
f
'the original data
{
pkl_path
}
.'
)
time
.
sleep
(
5
)
METAINFO
=
{
'
CLASSES
'
:
(
'table'
,
'chair'
,
'sofa'
,
'bookcase'
,
'board'
)}
METAINFO
=
{
'
classes
'
:
(
'table'
,
'chair'
,
'sofa'
,
'bookcase'
,
'board'
)}
print
(
f
'Reading from input file:
{
pkl_path
}
.'
)
data_list
=
mmengine
.
load
(
pkl_path
)
print
(
'Start updating:'
)
...
...
@@ -519,12 +533,12 @@ def update_s3dis_infos(pkl_path, out_dir):
temp_data_info
[
'sample_idx'
]
=
i
temp_data_info
[
'lidar_points'
][
'num_pts_feats'
]
=
ori_info_dict
[
'point_cloud'
][
'num_features'
]
temp_data_info
[
'lidar_points'
][
'lidar_path'
]
=
ori_info_dict
[
'pts_path'
].
split
(
'/'
)[
-
1
]
temp_data_info
[
'pts_semantic_mask_path'
]
=
ori_info_dict
[
'pts_semantic_mask_path'
]
.
split
(
'/'
)[
-
1
]
temp_data_info
[
'pts_instance_mask_path'
]
=
ori_info_dict
[
'pts_instance_mask_path'
]
.
split
(
'/'
)[
-
1
]
temp_data_info
[
'lidar_points'
][
'lidar_path'
]
=
Path
(
ori_info_dict
[
'pts_path'
]).
name
temp_data_info
[
'pts_semantic_mask_path'
]
=
Path
(
ori_info_dict
[
'pts_semantic_mask_path'
]
).
name
temp_data_info
[
'pts_instance_mask_path'
]
=
Path
(
ori_info_dict
[
'pts_instance_mask_path'
]
).
name
# TODO support camera
# np.linalg.inv(info['axis_align_matrix'] @ extrinsic): depth2cam
...
...
@@ -541,12 +555,12 @@ def update_s3dis_infos(pkl_path, out_dir):
empty_instance
[
'bbox_3d'
]
=
anns
[
'gt_boxes_upright_depth'
][
instance_id
].
tolist
()
if
anns
[
'class'
][
instance_id
]
<
len
(
METAINFO
[
'
CLASSES
'
]):
if
anns
[
'class'
][
instance_id
]
<
len
(
METAINFO
[
'
classes
'
]):
empty_instance
[
'bbox_label_3d'
]
=
anns
[
'class'
][
instance_id
]
else
:
ignore_class_name
.
add
(
METAINFO
[
'
CLASSES
'
][
anns
[
'class'
][
instance_id
]])
METAINFO
[
'
classes
'
][
anns
[
'class'
][
instance_id
]])
empty_instance
[
'bbox_label_3d'
]
=
-
1
empty_instance
=
clear_instance_unused_keys
(
empty_instance
)
...
...
@@ -554,12 +568,21 @@ def update_s3dis_infos(pkl_path, out_dir):
temp_data_info
[
'instances'
]
=
instance_list
temp_data_info
,
_
=
clear_data_info_unused_keys
(
temp_data_info
)
converted_list
.
append
(
temp_data_info
)
pkl_name
=
pkl_path
.
split
(
'/'
)[
-
1
]
pkl_name
=
Path
(
pkl_path
).
name
out_path
=
osp
.
join
(
out_dir
,
pkl_name
)
print
(
f
'Writing to output file:
{
out_path
}
.'
)
print
(
f
'ignore classes:
{
ignore_class_name
}
'
)
converted_data_info
=
dict
(
metainfo
=
{
'DATASET'
:
'S3DIS'
},
data_list
=
converted_list
)
# dataset metainfo
metainfo
=
dict
()
metainfo
[
'categories'
]
=
{
k
:
i
for
i
,
k
in
enumerate
(
METAINFO
[
'classes'
])}
if
ignore_class_name
:
for
ignore_class
in
ignore_class_name
:
metainfo
[
'categories'
][
ignore_class
]
=
-
1
metainfo
[
'dataset'
]
=
's3dis'
metainfo
[
'info_version'
]
=
'1.1'
converted_data_info
=
dict
(
metainfo
=
metainfo
,
data_list
=
converted_list
)
mmengine
.
dump
(
converted_data_info
,
out_path
,
'pkl'
)
...
...
@@ -571,7 +594,7 @@ def update_scannet_infos(pkl_path, out_dir):
f
'the original data
{
pkl_path
}
.'
)
time
.
sleep
(
5
)
METAINFO
=
{
'
CLASSES
'
:
'
classes
'
:
(
'cabinet'
,
'bed'
,
'chair'
,
'sofa'
,
'table'
,
'door'
,
'window'
,
'bookshelf'
,
'picture'
,
'counter'
,
'desk'
,
'curtain'
,
'refrigerator'
,
'showercurtrain'
,
'toilet'
,
'sink'
,
'bathtub'
,
'garbagebin'
)
...
...
@@ -584,12 +607,12 @@ def update_scannet_infos(pkl_path, out_dir):
temp_data_info
=
get_empty_standard_data_info
()
temp_data_info
[
'lidar_points'
][
'num_pts_feats'
]
=
ori_info_dict
[
'point_cloud'
][
'num_features'
]
temp_data_info
[
'lidar_points'
][
'lidar_path'
]
=
ori_info_dict
[
'pts_path'
].
split
(
'/'
)[
-
1
]
temp_data_info
[
'pts_semantic_mask_path'
]
=
ori_info_dict
[
'pts_semantic_mask_path'
]
.
split
(
'/'
)[
-
1
]
temp_data_info
[
'pts_instance_mask_path'
]
=
ori_info_dict
[
'pts_instance_mask_path'
]
.
split
(
'/'
)[
-
1
]
temp_data_info
[
'lidar_points'
][
'lidar_path'
]
=
Path
(
ori_info_dict
[
'pts_path'
]).
name
temp_data_info
[
'pts_semantic_mask_path'
]
=
Path
(
ori_info_dict
[
'pts_semantic_mask_path'
]
).
name
temp_data_info
[
'pts_instance_mask_path'
]
=
Path
(
ori_info_dict
[
'pts_instance_mask_path'
]
).
name
# TODO support camera
# np.linalg.inv(info['axis_align_matrix'] @ extrinsic): depth2cam
...
...
@@ -607,9 +630,9 @@ def update_scannet_infos(pkl_path, out_dir):
empty_instance
[
'bbox_3d'
]
=
anns
[
'gt_boxes_upright_depth'
][
instance_id
].
tolist
()
if
anns
[
'name'
][
instance_id
]
in
METAINFO
[
'
CLASSES
'
]:
if
anns
[
'name'
][
instance_id
]
in
METAINFO
[
'
classes
'
]:
empty_instance
[
'bbox_label_3d'
]
=
METAINFO
[
'
CLASSES
'
].
index
(
anns
[
'name'
][
instance_id
])
'
classes
'
].
index
(
anns
[
'name'
][
instance_id
])
else
:
ignore_class_name
.
add
(
anns
[
'name'
][
instance_id
])
empty_instance
[
'bbox_label_3d'
]
=
-
1
...
...
@@ -619,12 +642,21 @@ def update_scannet_infos(pkl_path, out_dir):
temp_data_info
[
'instances'
]
=
instance_list
temp_data_info
,
_
=
clear_data_info_unused_keys
(
temp_data_info
)
converted_list
.
append
(
temp_data_info
)
pkl_name
=
pkl_path
.
split
(
'/'
)[
-
1
]
pkl_name
=
Path
(
pkl_path
).
name
out_path
=
osp
.
join
(
out_dir
,
pkl_name
)
print
(
f
'Writing to output file:
{
out_path
}
.'
)
print
(
f
'ignore classes:
{
ignore_class_name
}
'
)
converted_data_info
=
dict
(
metainfo
=
{
'DATASET'
:
'SCANNET'
},
data_list
=
converted_list
)
# dataset metainfo
metainfo
=
dict
()
metainfo
[
'categories'
]
=
{
k
:
i
for
i
,
k
in
enumerate
(
METAINFO
[
'classes'
])}
if
ignore_class_name
:
for
ignore_class
in
ignore_class_name
:
metainfo
[
'categories'
][
ignore_class
]
=
-
1
metainfo
[
'dataset'
]
=
'scannet'
metainfo
[
'info_version'
]
=
'1.1'
converted_data_info
=
dict
(
metainfo
=
metainfo
,
data_list
=
converted_list
)
mmengine
.
dump
(
converted_data_info
,
out_path
,
'pkl'
)
...
...
@@ -636,7 +668,7 @@ def update_sunrgbd_infos(pkl_path, out_dir):
f
'the original data
{
pkl_path
}
.'
)
time
.
sleep
(
5
)
METAINFO
=
{
'
CLASSES
'
:
(
'bed'
,
'table'
,
'sofa'
,
'chair'
,
'toilet'
,
'desk'
,
'
classes
'
:
(
'bed'
,
'table'
,
'sofa'
,
'chair'
,
'toilet'
,
'desk'
,
'dresser'
,
'night_stand'
,
'bookshelf'
,
'bathtub'
)
}
print
(
f
'Reading from input file:
{
pkl_path
}
.'
)
...
...
@@ -647,8 +679,8 @@ def update_sunrgbd_infos(pkl_path, out_dir):
temp_data_info
=
get_empty_standard_data_info
()
temp_data_info
[
'lidar_points'
][
'num_pts_feats'
]
=
ori_info_dict
[
'point_cloud'
][
'num_features'
]
temp_data_info
[
'lidar_points'
][
'lidar_path'
]
=
ori_info_dict
[
'pts_path'
].
split
(
'/'
)[
-
1
]
temp_data_info
[
'lidar_points'
][
'lidar_path'
]
=
Path
(
ori_info_dict
[
'pts_path'
]).
name
calib
=
ori_info_dict
[
'calib'
]
rt_mat
=
calib
[
'Rt'
]
# follow Coord3DMode.convert_point
...
...
@@ -656,8 +688,8 @@ def update_sunrgbd_infos(pkl_path, out_dir):
])
@
rt_mat
.
transpose
(
1
,
0
)
depth2img
=
calib
[
'K'
]
@
rt_mat
temp_data_info
[
'images'
][
'CAM0'
][
'depth2img'
]
=
depth2img
.
tolist
()
temp_data_info
[
'images'
][
'CAM0'
][
'img_path'
]
=
ori_info_dict
[
'image'
][
'image_path'
].
split
(
'/'
)[
-
1
]
temp_data_info
[
'images'
][
'CAM0'
][
'img_path'
]
=
Path
(
ori_info_dict
[
'image'
][
'image_path'
]).
name
h
,
w
=
ori_info_dict
[
'image'
][
'image_shape'
]
temp_data_info
[
'images'
][
'CAM0'
][
'height'
]
=
h
temp_data_info
[
'images'
][
'CAM0'
][
'width'
]
=
w
...
...
@@ -674,9 +706,9 @@ def update_sunrgbd_infos(pkl_path, out_dir):
empty_instance
[
'bbox_3d'
]
=
anns
[
'gt_boxes_upright_depth'
][
instance_id
].
tolist
()
empty_instance
[
'bbox'
]
=
anns
[
'bbox'
][
instance_id
].
tolist
()
if
anns
[
'name'
][
instance_id
]
in
METAINFO
[
'
CLASSES
'
]:
if
anns
[
'name'
][
instance_id
]
in
METAINFO
[
'
classes
'
]:
empty_instance
[
'bbox_label_3d'
]
=
METAINFO
[
'
CLASSES
'
].
index
(
anns
[
'name'
][
instance_id
])
'
classes
'
].
index
(
anns
[
'name'
][
instance_id
])
empty_instance
[
'bbox_label'
]
=
empty_instance
[
'bbox_label_3d'
]
else
:
...
...
@@ -688,12 +720,21 @@ def update_sunrgbd_infos(pkl_path, out_dir):
temp_data_info
[
'instances'
]
=
instance_list
temp_data_info
,
_
=
clear_data_info_unused_keys
(
temp_data_info
)
converted_list
.
append
(
temp_data_info
)
pkl_name
=
pkl_path
.
split
(
'/'
)[
-
1
]
pkl_name
=
Path
(
pkl_path
).
name
out_path
=
osp
.
join
(
out_dir
,
pkl_name
)
print
(
f
'Writing to output file:
{
out_path
}
.'
)
print
(
f
'ignore classes:
{
ignore_class_name
}
'
)
converted_data_info
=
dict
(
metainfo
=
{
'DATASET'
:
'SUNRGBD'
},
data_list
=
converted_list
)
# dataset metainfo
metainfo
=
dict
()
metainfo
[
'categories'
]
=
{
k
:
i
for
i
,
k
in
enumerate
(
METAINFO
[
'classes'
])}
if
ignore_class_name
:
for
ignore_class
in
ignore_class_name
:
metainfo
[
'categories'
][
ignore_class
]
=
-
1
metainfo
[
'dataset'
]
=
'sunrgbd'
metainfo
[
'info_version'
]
=
'1.1'
converted_data_info
=
dict
(
metainfo
=
metainfo
,
data_list
=
converted_list
)
mmengine
.
dump
(
converted_data_info
,
out_path
,
'pkl'
)
...
...
@@ -706,13 +747,9 @@ def update_lyft_infos(pkl_path, out_dir):
print
(
f
'Reading from input file:
{
pkl_path
}
.'
)
data_list
=
mmengine
.
load
(
pkl_path
)
METAINFO
=
{
'
CLASSES
'
:
'
classes
'
:
(
'car'
,
'truck'
,
'bus'
,
'emergency_vehicle'
,
'other_vehicle'
,
'motorcycle'
,
'bicycle'
,
'pedestrian'
,
'animal'
),
'DATASET'
:
'Nuscenes'
,
'version'
:
data_list
[
'metadata'
][
'version'
]
}
print
(
'Start updating:'
)
converted_list
=
[]
...
...
@@ -724,8 +761,10 @@ def update_lyft_infos(pkl_path, out_dir):
temp_data_info
[
'ego2global'
]
=
convert_quaternion_to_matrix
(
ori_info_dict
[
'ego2global_rotation'
],
ori_info_dict
[
'ego2global_translation'
])
temp_data_info
[
'lidar_points'
][
'lidar_path'
]
=
ori_info_dict
[
'lidar_path'
].
split
(
'/'
)[
-
1
]
temp_data_info
[
'lidar_points'
][
'num_pts_feats'
]
=
ori_info_dict
.
get
(
'num_features'
,
5
)
temp_data_info
[
'lidar_points'
][
'lidar_path'
]
=
Path
(
ori_info_dict
[
'lidar_path'
]).
name
temp_data_info
[
'lidar_points'
][
'lidar2ego'
]
=
convert_quaternion_to_matrix
(
ori_info_dict
[
'lidar2ego_rotation'
],
...
...
@@ -756,8 +795,8 @@ def update_lyft_infos(pkl_path, out_dir):
temp_data_info
[
'images'
]
=
{}
for
cam
in
ori_info_dict
[
'cams'
]:
empty_img_info
=
get_empty_img_info
()
empty_img_info
[
'img_path'
]
=
ori_info_dict
[
'cams'
][
cam
][
'data_path'
].
split
(
'/'
)[
-
1
]
empty_img_info
[
'img_path'
]
=
Path
(
ori_info_dict
[
'cams'
][
cam
][
'data_path'
]).
name
empty_img_info
[
'cam2img'
]
=
ori_info_dict
[
'cams'
][
cam
][
'cam_intrinsic'
].
tolist
()
empty_img_info
[
'sample_data_token'
]
=
ori_info_dict
[
'cams'
][
cam
][
...
...
@@ -781,8 +820,8 @@ def update_lyft_infos(pkl_path, out_dir):
empty_instance
=
get_empty_instance
()
empty_instance
[
'bbox_3d'
]
=
ori_info_dict
[
'gt_boxes'
][
i
,
:].
tolist
()
if
ori_info_dict
[
'gt_names'
][
i
]
in
METAINFO
[
'
CLASSES
'
]:
empty_instance
[
'bbox_label'
]
=
METAINFO
[
'
CLASSES
'
].
index
(
if
ori_info_dict
[
'gt_names'
][
i
]
in
METAINFO
[
'
classes
'
]:
empty_instance
[
'bbox_label'
]
=
METAINFO
[
'
classes
'
].
index
(
ori_info_dict
[
'gt_names'
][
i
])
else
:
ignore_class_name
.
add
(
ori_info_dict
[
'gt_names'
][
i
])
...
...
@@ -793,11 +832,20 @@ def update_lyft_infos(pkl_path, out_dir):
temp_data_info
[
'instances'
].
append
(
empty_instance
)
temp_data_info
,
_
=
clear_data_info_unused_keys
(
temp_data_info
)
converted_list
.
append
(
temp_data_info
)
pkl_name
=
pkl_path
.
split
(
'/'
)[
-
1
]
pkl_name
=
Path
(
pkl_path
).
name
out_path
=
osp
.
join
(
out_dir
,
pkl_name
)
print
(
f
'Writing to output file:
{
out_path
}
.'
)
print
(
f
'ignore classes:
{
ignore_class_name
}
'
)
converted_data_info
=
dict
(
metainfo
=
METAINFO
,
data_list
=
converted_list
)
metainfo
=
dict
()
metainfo
[
'categories'
]
=
{
k
:
i
for
i
,
k
in
enumerate
(
METAINFO
[
'classes'
])}
if
ignore_class_name
:
for
ignore_class
in
ignore_class_name
:
metainfo
[
'categories'
][
ignore_class
]
=
-
1
metainfo
[
'dataset'
]
=
'lyft'
metainfo
[
'version'
]
=
data_list
[
'metadata'
][
'version'
]
metainfo
[
'info_version'
]
=
'1.1'
converted_data_info
=
dict
(
metainfo
=
metainfo
,
data_list
=
converted_list
)
mmengine
.
dump
(
converted_data_info
,
out_path
,
'pkl'
)
...
...
@@ -820,7 +868,7 @@ def update_waymo_infos(pkl_path, out_dir):
# TODO update to full label
# TODO discuss how to process 'Van', 'DontCare'
METAINFO
=
{
'
CLASSES
'
:
(
'Car'
,
'Pedestrian'
,
'Cyclist'
,
'Sign'
),
'
classes
'
:
(
'Car'
,
'Pedestrian'
,
'Cyclist'
,
'Sign'
),
}
print
(
f
'Reading from input file:
{
pkl_path
}
.'
)
data_list
=
mmengine
.
load
(
pkl_path
)
...
...
@@ -851,7 +899,7 @@ def update_waymo_infos(pkl_path, out_dir):
ori_info_dict
[
'calib'
][
f
'P
{
cam_idx
}
'
]
@
lidar2cam
).
tolist
()
# image path
base_img_path
=
ori_info_dict
[
'image'
][
'image_path'
]
.
split
(
'/'
)[
-
1
]
base_img_path
=
Path
(
ori_info_dict
[
'image'
][
'image_path'
]
).
name
for
cam_idx
,
cam_key
in
enumerate
(
camera_types
):
temp_data_info
[
'images'
][
cam_key
][
'timestamp'
]
=
ori_info_dict
[
...
...
@@ -867,8 +915,8 @@ def update_waymo_infos(pkl_path, out_dir):
'point_cloud'
][
'num_features'
]
temp_data_info
[
'lidar_points'
][
'timestamp'
]
=
ori_info_dict
[
'timestamp'
]
temp_data_info
[
'lidar_points'
][
'lidar_path'
]
=
ori_info_dict
[
'point_cloud'
][
'velodyne_path'
]
.
split
(
'/'
)[
-
1
]
temp_data_info
[
'lidar_points'
][
'lidar_path'
]
=
Path
(
ori_info_dict
[
'point_cloud'
][
'velodyne_path'
]
).
name
# TODO discuss the usage of Tr_velo_to_cam in lidar
Trv2c
=
ori_info_dict
[
'calib'
][
'Tr_velo_to_cam'
].
astype
(
np
.
float32
)
...
...
@@ -888,13 +936,13 @@ def update_waymo_infos(pkl_path, out_dir):
lidar_sweep
=
get_single_lidar_sweep
()
lidar_sweep
[
'ego2global'
]
=
ori_sweep
[
'pose'
]
lidar_sweep
[
'timestamp'
]
=
ori_sweep
[
'timestamp'
]
lidar_sweep
[
'lidar_points'
][
'lidar_path'
]
=
ori_sweep
[
'velodyne_path'
]
.
split
(
'/'
)[
-
1
]
lidar_sweep
[
'lidar_points'
][
'lidar_path'
]
=
Path
(
ori_sweep
[
'velodyne_path'
]
).
name
# image sweeps
image_sweep
=
get_single_image_sweep
(
camera_types
)
image_sweep
[
'ego2global'
]
=
ori_sweep
[
'pose'
]
image_sweep
[
'timestamp'
]
=
ori_sweep
[
'timestamp'
]
img_path
=
ori_sweep
[
'image_path'
]
.
split
(
'/'
)[
-
1
]
img_path
=
Path
(
ori_sweep
[
'image_path'
]
).
name
for
cam_idx
,
cam_key
in
enumerate
(
camera_types
):
image_sweep
[
'images'
][
cam_key
][
'img_path'
]
=
img_path
...
...
@@ -910,8 +958,8 @@ def update_waymo_infos(pkl_path, out_dir):
empty_instance
=
get_empty_instance
()
empty_instance
[
'bbox'
]
=
anns
[
'bbox'
][
instance_id
].
tolist
()
if
anns
[
'name'
][
instance_id
]
in
METAINFO
[
'
CLASSES
'
]:
empty_instance
[
'bbox_label'
]
=
METAINFO
[
'
CLASSES
'
].
index
(
if
anns
[
'name'
][
instance_id
]
in
METAINFO
[
'
classes
'
]:
empty_instance
[
'bbox_label'
]
=
METAINFO
[
'
classes
'
].
index
(
anns
[
'name'
][
instance_id
])
else
:
ignore_class_name
.
add
(
anns
[
'name'
][
instance_id
])
...
...
@@ -954,8 +1002,8 @@ def update_waymo_infos(pkl_path, out_dir):
empty_instance
=
get_empty_instance
()
empty_instance
[
'bbox'
]
=
anns
[
'bbox'
][
instance_id
].
tolist
()
if
anns
[
'name'
][
instance_id
]
in
METAINFO
[
'
CLASSES
'
]:
empty_instance
[
'bbox_label'
]
=
METAINFO
[
'
CLASSES
'
].
index
(
if
anns
[
'name'
][
instance_id
]
in
METAINFO
[
'
classes
'
]:
empty_instance
[
'bbox_label'
]
=
METAINFO
[
'
classes
'
].
index
(
anns
[
'name'
][
instance_id
])
else
:
ignore_class_name
.
add
(
anns
[
'name'
][
instance_id
])
...
...
@@ -991,12 +1039,22 @@ def update_waymo_infos(pkl_path, out_dir):
temp_data_info
,
_
=
clear_data_info_unused_keys
(
temp_data_info
)
converted_list
.
append
(
temp_data_info
)
pkl_name
=
pkl_path
.
split
(
'/'
)[
-
1
]
pkl_name
=
Path
(
pkl_path
).
name
out_path
=
osp
.
join
(
out_dir
,
pkl_name
)
print
(
f
'Writing to output file:
{
out_path
}
.'
)
print
(
f
'ignore classes:
{
ignore_class_name
}
'
)
converted_data_info
=
dict
(
metainfo
=
{
'DATASET'
:
'Waymo'
},
data_list
=
converted_list
)
# dataset metainfo
metainfo
=
dict
()
metainfo
[
'categories'
]
=
{
k
:
i
for
i
,
k
in
enumerate
(
METAINFO
[
'classes'
])}
if
ignore_class_name
:
for
ignore_class
in
ignore_class_name
:
metainfo
[
'categories'
][
ignore_class
]
=
-
1
metainfo
[
'dataset'
]
=
'waymo'
metainfo
[
'version'
]
=
'1.2'
metainfo
[
'info_version'
]
=
'1.1'
converted_data_info
=
dict
(
metainfo
=
metainfo
,
data_list
=
converted_list
)
mmengine
.
dump
(
converted_data_info
,
out_path
,
'pkl'
)
...
...
@@ -1073,4 +1131,4 @@ if __name__ == '__main__':
if
args
.
out_dir
is
None
:
args
.
out_dir
=
args
.
root_dir
update_pkl_infos
(
dataset
=
args
.
dataset
,
out_dir
=
args
.
out_dir
,
pkl_path
=
args
.
pkl
)
dataset
=
args
.
dataset
,
out_dir
=
args
.
out_dir
,
pkl_path
=
args
.
pkl
_path
)
tools/dataset_converters/waymo_converter.py
View file @
d7067e44
...
...
@@ -12,7 +12,6 @@ except ImportError:
from
glob
import
glob
from
os.path
import
join
import
mmcv
import
mmengine
import
numpy
as
np
import
tensorflow
as
tf
...
...
@@ -70,10 +69,7 @@ class Waymo2KITTI(object):
'_SIDE_LEFT'
,
'_SIDE_RIGHT'
,
]
self
.
lidar_list
=
[
'_FRONT'
,
'_FRONT_RIGHT'
,
'_FRONT_LEFT'
,
'_SIDE_RIGHT'
,
'_SIDE_LEFT'
]
self
.
lidar_list
=
[
'TOP'
,
'FRONT'
,
'SIDE_LEFT'
,
'SIDE_RIGHT'
,
'REAR'
]
self
.
type_list
=
[
'UNKNOWN'
,
'VEHICLE'
,
'PEDESTRIAN'
,
'SIGN'
,
'CYCLIST'
]
...
...
@@ -136,7 +132,9 @@ class Waymo2KITTI(object):
self
.
save_image
(
frame
,
file_idx
,
frame_idx
)
self
.
save_calib
(
frame
,
file_idx
,
frame_idx
)
self
.
save_lidar
(
frame
,
file_idx
,
frame_idx
)
if
'testing_3d_camera_only_detection'
not
in
self
.
load_dir
:
# the camera only split doesn't contain lidar points.
self
.
save_lidar
(
frame
,
file_idx
,
frame_idx
)
self
.
save_pose
(
frame
,
file_idx
,
frame_idx
)
self
.
save_timestamp
(
frame
,
file_idx
,
frame_idx
)
...
...
@@ -162,8 +160,8 @@ class Waymo2KITTI(object):
img_path
=
f
'
{
self
.
image_save_dir
}{
str
(
img
.
name
-
1
)
}
/'
+
\
f
'
{
self
.
prefix
}{
str
(
file_idx
).
zfill
(
3
)
}
'
+
\
f
'
{
str
(
frame_idx
).
zfill
(
3
)
}
.jpg'
img
=
mmcv
.
imfrombytes
(
img
.
image
)
mmcv
.
im
write
(
img
,
img_path
)
with
open
(
img_path
,
'wb'
)
as
fp
:
fp
.
write
(
img
.
image
)
def
save_calib
(
self
,
frame
,
file_idx
,
frame_idx
):
"""Parse and save the calibration data.
...
...
@@ -442,7 +440,6 @@ class Waymo2KITTI(object):
dir_list1
=
[
self
.
label_all_save_dir
,
self
.
calib_save_dir
,
self
.
point_cloud_save_dir
,
self
.
pose_save_dir
,
self
.
timestamp_save_dir
,
]
...
...
@@ -452,10 +449,12 @@ class Waymo2KITTI(object):
dir_list2
.
append
(
self
.
cam_sync_label_save_dir
)
else
:
dir_list1
=
[
self
.
calib_save_dir
,
self
.
po
int_cloud
_save_dir
,
self
.
pose_save_dir
,
self
.
timestamp_save_dir
self
.
calib_save_dir
,
self
.
po
se
_save_dir
,
self
.
timestamp_save_dir
]
dir_list2
=
[
self
.
image_save_dir
]
if
'testing_3d_camera_only_detection'
not
in
self
.
load_dir
:
dir_list1
.
append
(
self
.
point_cloud_save_dir
)
for
d
in
dir_list1
:
mmengine
.
mkdir_or_exist
(
d
)
for
d
in
dir_list2
:
...
...
tools/misc/browse_dataset.py
View file @
d7067e44
...
...
@@ -71,7 +71,7 @@ def build_data_cfg(config_path, aug, cfg_options):
if
aug
:
show_pipeline
=
cfg
.
train_pipeline
else
:
show_pipeline
=
cfg
.
eval
_pipeline
show_pipeline
=
cfg
.
test
_pipeline
for
i
in
range
(
len
(
cfg
.
train_pipeline
)):
if
cfg
.
train_pipeline
[
i
][
'type'
]
==
'LoadAnnotations3D'
:
show_pipeline
.
insert
(
i
,
cfg
.
train_pipeline
[
i
])
...
...
@@ -117,13 +117,20 @@ def main():
progress_bar
=
ProgressBar
(
len
(
dataset
))
for
item
in
dataset
:
for
i
,
item
in
enumerate
(
dataset
)
:
# the 3D Boxes in input could be in any of three coordinates
data_input
=
item
[
'inputs'
]
data_sample
=
item
[
'data_samples'
].
numpy
()
out_file
=
osp
.
join
(
args
.
output_dir
)
if
args
.
output_dir
is
not
None
else
None
args
.
output_dir
,
f
'
{
i
}
.jpg'
)
if
args
.
output_dir
is
not
None
else
None
# o3d_save_path is valid when args.not_show is False
o3d_save_path
=
osp
.
join
(
args
.
output_dir
,
f
'pc_
{
i
}
.png'
)
if
(
args
.
output_dir
is
not
None
and
vis_task
in
[
'lidar_det'
,
'lidar_seg'
,
'multi-modality_det'
]
and
not
args
.
not_show
)
else
None
visualizer
.
add_datasample
(
'3d visualzier'
,
...
...
@@ -132,6 +139,7 @@ def main():
show
=
not
args
.
not_show
,
wait_time
=
args
.
show_interval
,
out_file
=
out_file
,
o3d_save_path
=
o3d_save_path
,
vis_task
=
vis_task
)
progress_bar
.
update
()
...
...
Prev
1
…
14
15
16
17
18
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment