Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
mmdetection3d
Commits
ed115937
Commit
ed115937
authored
Jul 05, 2022
by
VVsssssk
Committed by
ChaimZhu
Jul 20, 2022
Browse files
[Refactor]Refactor nus dataset
parent
f739803c
Changes
14
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
14 changed files
with
389 additions
and
662 deletions
+389
-662
configs/_base_/datasets/nus-3d.py
configs/_base_/datasets/nus-3d.py
+50
-40
configs/_base_/models/hv_pointpillars_fpn_nus.py
configs/_base_/models/hv_pointpillars_fpn_nus.py
+7
-4
configs/_base_/schedules/schedule_2x.py
configs/_base_/schedules/schedule_2x.py
+28
-12
configs/pointpillars/hv_pointpillars_fpn_sbn-all_4x8_2x_nus-3d.py
...pointpillars/hv_pointpillars_fpn_sbn-all_4x8_2x_nus-3d.py
+6
-0
configs/pointpillars/hv_pointpillars_fpn_sbn-all_fp16_2x8_2x_nus-3d.py
...pillars/hv_pointpillars_fpn_sbn-all_fp16_2x8_2x_nus-3d.py
+1
-1
configs/pointpillars/hv_pointpillars_secfpn_sbn-all_4x8_2x_nus-3d.py
...ntpillars/hv_pointpillars_secfpn_sbn-all_4x8_2x_nus-3d.py
+6
-0
configs/pointpillars/hv_pointpillars_secfpn_sbn-all_fp16_2x8_2x_nus-3d.py
...lars/hv_pointpillars_secfpn_sbn-all_fp16_2x8_2x_nus-3d.py
+1
-1
mmdet3d/datasets/det3d_dataset.py
mmdet3d/datasets/det3d_dataset.py
+6
-2
mmdet3d/datasets/nuscenes_dataset.py
mmdet3d/datasets/nuscenes_dataset.py
+53
-585
mmdet3d/datasets/pipelines/loading.py
mmdet3d/datasets/pipelines/loading.py
+22
-17
mmdet3d/datasets/utils.py
mmdet3d/datasets/utils.py
+13
-0
tests/data/nuscenes/nus_info.pkl
tests/data/nuscenes/nus_info.pkl
+0
-0
tests/test_data/test_datasets/test_nuscenes_dataset.py
tests/test_data/test_datasets/test_nuscenes_dataset.py
+81
-0
tools/data_converter/update_infos_to_v2.py
tools/data_converter/update_infos_to_v2.py
+115
-0
No files found.
configs/_base_/datasets/nus-3d.py
View file @
ed115937
...
...
@@ -6,17 +6,14 @@ class_names = [
'car'
,
'truck'
,
'trailer'
,
'bus'
,
'construction_vehicle'
,
'bicycle'
,
'motorcycle'
,
'pedestrian'
,
'traffic_cone'
,
'barrier'
]
metainfo
=
dict
(
CLASSES
=
class_names
)
dataset_type
=
'NuScenesDataset'
data_root
=
'data/nuscenes/'
# Input modality for nuScenes dataset, this is consistent with the submission
# format which requires the information in input_modality.
input_modality
=
dict
(
use_lidar
=
True
,
use_camera
=
False
,
use_radar
=
False
,
use_map
=
False
,
use_external
=
False
)
input_modality
=
dict
(
use_lidar
=
True
,
use_camera
=
False
)
file_client_args
=
dict
(
backend
=
'disk'
)
data_prefix
=
dict
(
pts
=
'samples/LIDAR_TOP'
,
img
=
''
)
# Uncomment the following if use ceph or other file clients.
# See https://mmcv.readthedocs.io/en/latest/api.html#mmcv.fileio.FileClient
# for more details.
...
...
@@ -48,8 +45,9 @@ train_pipeline = [
dict
(
type
=
'ObjectRangeFilter'
,
point_cloud_range
=
point_cloud_range
),
dict
(
type
=
'ObjectNameFilter'
,
classes
=
class_names
),
dict
(
type
=
'PointShuffle'
),
dict
(
type
=
'DefaultFormatBundle3D'
,
class_names
=
class_names
),
dict
(
type
=
'Collect3D'
,
keys
=
[
'points'
,
'gt_bboxes_3d'
,
'gt_labels_3d'
])
dict
(
type
=
'Pack3DDetInputs'
,
keys
=
[
'points'
,
'gt_bboxes_3d'
,
'gt_labels_3d'
])
]
test_pipeline
=
[
dict
(
...
...
@@ -61,6 +59,7 @@ test_pipeline = [
dict
(
type
=
'LoadPointsFromMultiSweeps'
,
sweeps_num
=
10
,
test_mode
=
True
,
file_client_args
=
file_client_args
),
dict
(
type
=
'MultiScaleFlipAug3D'
,
...
...
@@ -75,13 +74,9 @@ test_pipeline = [
translation_std
=
[
0
,
0
,
0
]),
dict
(
type
=
'RandomFlip3D'
),
dict
(
type
=
'PointsRangeFilter'
,
point_cloud_range
=
point_cloud_range
),
dict
(
type
=
'DefaultFormatBundle3D'
,
class_names
=
class_names
,
with_label
=
False
),
dict
(
type
=
'Collect3D'
,
keys
=
[
'points'
])
])
type
=
'PointsRangeFilter'
,
point_cloud_range
=
point_cloud_range
)
]),
dict
(
type
=
'Pack3DDetInputs'
,
keys
=
[
'points'
])
]
# construct a pipeline for data and gt loading in show function
# please keep its loading function consistent with test_pipeline (e.g. client)
...
...
@@ -95,48 +90,63 @@ eval_pipeline = [
dict
(
type
=
'LoadPointsFromMultiSweeps'
,
sweeps_num
=
10
,
test_mode
=
True
,
file_client_args
=
file_client_args
),
dict
(
type
=
'DefaultFormatBundle3D'
,
class_names
=
class_names
,
with_label
=
False
),
dict
(
type
=
'Collect3D'
,
keys
=
[
'points'
])
dict
(
type
=
'Pack3DDetInputs'
,
keys
=
[
'points'
])
]
data
=
dict
(
samples_per_gpu
=
4
,
workers_per_gpu
=
4
,
train
=
dict
(
train_dataloader
=
dict
(
batch_size
=
4
,
num_workers
=
4
,
persistent_workers
=
True
,
sampler
=
dict
(
type
=
'DefaultSampler'
,
shuffle
=
True
),
dataset
=
dict
(
type
=
dataset_type
,
data_root
=
data_root
,
ann_file
=
data_root
+
'nuscenes_infos_train.pkl'
,
ann_file
=
'nuscenes_infos_train.pkl'
,
pipeline
=
train_pipeline
,
classes
=
class_names
,
metainfo
=
metainfo
,
modality
=
input_modality
,
test_mode
=
False
,
data_prefix
=
data_prefix
,
# we use box_type_3d='LiDAR' in kitti and nuscenes dataset
# and box_type_3d='Depth' in sunrgbd and scannet dataset.
box_type_3d
=
'LiDAR'
),
val
=
dict
(
box_type_3d
=
'LiDAR'
))
test_dataloader
=
dict
(
batch_size
=
1
,
num_workers
=
1
,
persistent_workers
=
True
,
drop_last
=
False
,
sampler
=
dict
(
type
=
'DefaultSampler'
,
shuffle
=
False
),
dataset
=
dict
(
type
=
dataset_type
,
data_root
=
data_root
,
ann_file
=
data_root
+
'nuscenes_infos_val.pkl'
,
ann_file
=
'nuscenes_infos_val.pkl'
,
pipeline
=
test_pipeline
,
classes
=
class_names
,
metainfo
=
metainfo
,
modality
=
input_modality
,
data_prefix
=
data_prefix
,
test_mode
=
True
,
box_type_3d
=
'LiDAR'
),
test
=
dict
(
box_type_3d
=
'LiDAR'
))
val_dataloader
=
dict
(
batch_size
=
1
,
num_workers
=
1
,
persistent_workers
=
True
,
drop_last
=
False
,
sampler
=
dict
(
type
=
'DefaultSampler'
,
shuffle
=
False
),
dataset
=
dict
(
type
=
dataset_type
,
data_root
=
data_root
,
ann_file
=
data_root
+
'nuscenes_infos_val.pkl'
,
ann_file
=
'nuscenes_infos_val.pkl'
,
pipeline
=
test_pipeline
,
classes
=
class_names
,
metainfo
=
metainfo
,
modality
=
input_modality
,
test_mode
=
True
,
data_prefix
=
data_prefix
,
box_type_3d
=
'LiDAR'
))
# For nuScenes dataset, we usually evaluate the model at the end of training.
# Since the models are trained by 24 epochs by default, we set evaluation
# interval to be 24. Please change the interval accordingly if you do not
# use a default schedule.
evaluation
=
dict
(
interval
=
24
,
pipeline
=
eval_pipeline
)
val_evaluator
=
dict
(
type
=
'NuScenesMetric'
,
data_root
=
data_root
,
ann_file
=
data_root
+
'nuscenes_infos_val.pkl'
,
metric
=
'bbox'
)
test_evaluator
=
val_evaluator
configs/_base_/models/hv_pointpillars_fpn_nus.py
View file @
ed115937
...
...
@@ -6,6 +6,7 @@
voxel_size
=
[
0.25
,
0.25
,
8
]
model
=
dict
(
type
=
'MVXFasterRCNN'
,
data_preprocessor
=
dict
(
type
=
'Det3DDataPreprocessor'
),
pts_voxel_layer
=
dict
(
max_num_points
=
64
,
point_cloud_range
=
[
-
50
,
-
50
,
-
5
,
50
,
50
,
3
],
...
...
@@ -62,19 +63,21 @@ model = dict(
dir_offset
=-
0.7854
,
# -pi / 4
bbox_coder
=
dict
(
type
=
'DeltaXYZWLHRBBoxCoder'
,
code_size
=
9
),
loss_cls
=
dict
(
type
=
'FocalLoss'
,
type
=
'
mmdet.
FocalLoss'
,
use_sigmoid
=
True
,
gamma
=
2.0
,
alpha
=
0.25
,
loss_weight
=
1.0
),
loss_bbox
=
dict
(
type
=
'SmoothL1Loss'
,
beta
=
1.0
/
9.0
,
loss_weight
=
1.0
),
loss_bbox
=
dict
(
type
=
'mmdet.SmoothL1Loss'
,
beta
=
1.0
/
9.0
,
loss_weight
=
1.0
),
loss_dir
=
dict
(
type
=
'CrossEntropyLoss'
,
use_sigmoid
=
False
,
loss_weight
=
0.2
)),
type
=
'mmdet.CrossEntropyLoss'
,
use_sigmoid
=
False
,
loss_weight
=
0.2
)),
# model training and testing settings
train_cfg
=
dict
(
pts
=
dict
(
assigner
=
dict
(
type
=
'MaxIoUAssigner'
,
type
=
'Max
3D
IoUAssigner'
,
iou_calculator
=
dict
(
type
=
'BboxOverlapsNearest3D'
),
pos_iou_thr
=
0.6
,
neg_iou_thr
=
0.3
,
...
...
configs/_base_/schedules/schedule_2x.py
View file @
ed115937
# optimizer
# This schedule is mainly used by models on nuScenes dataset
optimizer
=
dict
(
type
=
'AdamW'
,
lr
=
0.001
,
weight_decay
=
0.01
)
# max_norm=10 is better for SECOND
optimizer_config
=
dict
(
grad_clip
=
dict
(
max_norm
=
35
,
norm_type
=
2
))
lr_config
=
dict
(
policy
=
'step'
,
warmup
=
'linear'
,
warmup_iters
=
1000
,
warmup_ratio
=
1.0
/
1000
,
step
=
[
20
,
23
])
momentum_config
=
None
# runtime settings
runner
=
dict
(
type
=
'EpochBasedRunner'
,
max_epochs
=
24
)
lr
=
0.001
optim_wrapper
=
dict
(
type
=
'OptimWrapper'
,
optimizer
=
dict
(
type
=
'AdamW'
,
lr
=
lr
,
weight_decay
=
0.01
),
# max_norm=10 is better for SECOND
clip_grad
=
dict
(
max_norm
=
35
,
norm_type
=
2
))
# training schedule for 2x
train_cfg
=
dict
(
type
=
'EpochBasedTrainLoop'
,
max_epochs
=
24
,
val_interval
=
1
)
val_cfg
=
dict
(
type
=
'ValLoop'
)
test_cfg
=
dict
(
type
=
'TestLoop'
)
# learning rate
param_scheduler
=
[
dict
(
type
=
'LinearLR'
,
start_factor
=
1.0
/
1000
,
by_epoch
=
False
,
begin
=
0
,
end
=
1000
),
dict
(
type
=
'MultiStepLR'
,
begin
=
0
,
end
=
24
,
by_epoch
=
True
,
milestones
=
[
20
,
23
],
gamma
=
0.1
)
]
configs/pointpillars/hv_pointpillars_fpn_sbn-all_4x8_2x_nus-3d.py
View file @
ed115937
...
...
@@ -3,3 +3,9 @@ _base_ = [
'../_base_/datasets/nus-3d.py'
,
'../_base_/schedules/schedule_2x.py'
,
'../_base_/default_runtime.py'
]
# For nuScenes dataset, we usually evaluate the model at the end of training.
# Since the models are trained by 24 epochs by default, we set evaluation
# interval to be 24. Please change the interval accordingly if you do not
# use a default schedule.
train_cfg
=
dict
(
val_interval
=
24
)
configs/pointpillars/hv_pointpillars_fpn_sbn-all_fp16_2x8_2x_nus-3d.py
View file @
ed115937
_base_
=
'./hv_pointpillars_fpn_sbn-all_4x8_2x_nus-3d.py'
data
=
dict
(
samples_per_gpu
=
2
,
workers_per_gpu
=
2
)
train_dataloader
=
dict
(
batch_size
=
2
,
num_workers
=
2
)
# fp16 settings, the loss scale is specifically tuned to avoid Nan
fp16
=
dict
(
loss_scale
=
32.
)
configs/pointpillars/hv_pointpillars_secfpn_sbn-all_4x8_2x_nus-3d.py
View file @
ed115937
...
...
@@ -40,3 +40,9 @@ model = dict(
custom_values
=
[
0
,
0
],
rotations
=
[
0
,
1.57
],
reshape_out
=
True
)))
# For nuScenes dataset, we usually evaluate the model at the end of training.
# Since the models are trained by 24 epochs by default, we set evaluation
# interval to be 24. Please change the interval accordingly if you do not
# use a default schedule.
train_cfg
=
dict
(
val_interval
=
24
)
configs/pointpillars/hv_pointpillars_secfpn_sbn-all_fp16_2x8_2x_nus-3d.py
View file @
ed115937
_base_
=
'./hv_pointpillars_secfpn_sbn-all_4x8_2x_nus-3d.py'
data
=
dict
(
samples_per_gpu
=
2
,
workers_per_gpu
=
2
)
train_dataloader
=
dict
(
batch_size
=
2
,
num_workers
=
2
)
# fp16 settings, the loss scale is specifically tuned to avoid Nan
fp16
=
dict
(
loss_scale
=
32.
)
mmdet3d/datasets/det3d_dataset.py
View file @
ed115937
...
...
@@ -227,8 +227,12 @@ class Det3DDataset(BaseDataset):
if
self
.
modality
[
'use_camera'
]:
for
cam_id
,
img_info
in
info
[
'images'
].
items
():
if
'img_path'
in
img_info
:
img_info
[
'img_path'
]
=
osp
.
join
(
self
.
data_prefix
.
get
(
'img'
,
''
),
img_info
[
'img_path'
])
if
cam_id
in
self
.
data_prefix
:
cam_prefix
=
self
.
data_prefix
[
cam_id
]
else
:
cam_prefix
=
self
.
data_prefix
.
get
(
'img'
,
''
)
img_info
[
'img_path'
]
=
osp
.
join
(
cam_prefix
,
img_info
[
'img_path'
])
if
not
self
.
test_mode
:
# used in traing
...
...
mmdet3d/datasets/nuscenes_dataset.py
View file @
ed115937
This diff is collapsed.
Click to expand it.
mmdet3d/datasets/pipelines/loading.py
View file @
ed115937
...
...
@@ -101,7 +101,7 @@ class LoadImageFromFileMono3D(LoadImageFromFile):
@
TRANSFORMS
.
register_module
()
class
LoadPointsFromMultiSweeps
(
object
):
class
LoadPointsFromMultiSweeps
(
BaseTransform
):
"""Load points from multiple sweeps.
This is usually used for nuScenes dataset to utilize previous sweeps.
...
...
@@ -186,7 +186,7 @@ class LoadPointsFromMultiSweeps(object):
not_close
=
np
.
logical_not
(
np
.
logical_and
(
x_filt
,
y_filt
))
return
points
[
not_close
]
def
__call__
(
self
,
results
):
def
transform
(
self
,
results
):
"""Call function to load multi-sweep point clouds from files.
Args:
...
...
@@ -204,30 +204,35 @@ class LoadPointsFromMultiSweeps(object):
points
.
tensor
[:,
4
]
=
0
sweep_points_list
=
[
points
]
ts
=
results
[
'timestamp'
]
if
self
.
pad_empty_sweeps
and
len
(
results
[
'sweeps'
])
==
0
:
for
i
in
range
(
self
.
sweeps_num
):
if
self
.
remove_close
:
sweep_points_list
.
append
(
self
.
_remove_close
(
points
))
else
:
sweep_points_list
.
append
(
points
)
if
'lidar_sweeps'
not
in
results
:
if
self
.
pad_empty_sweeps
:
for
i
in
range
(
self
.
sweeps_num
):
if
self
.
remove_close
:
sweep_points_list
.
append
(
self
.
_remove_close
(
points
))
else
:
sweep_points_list
.
append
(
points
)
else
:
if
len
(
results
[
'sweeps'
])
<=
self
.
sweeps_num
:
choices
=
np
.
arange
(
len
(
results
[
'sweeps'
]))
if
len
(
results
[
'
lidar_
sweeps'
])
<=
self
.
sweeps_num
:
choices
=
np
.
arange
(
len
(
results
[
'
lidar_
sweeps'
]))
elif
self
.
test_mode
:
choices
=
np
.
arange
(
self
.
sweeps_num
)
else
:
choices
=
np
.
random
.
choice
(
len
(
results
[
'sweeps'
]),
self
.
sweeps_num
,
replace
=
False
)
len
(
results
[
'lidar_sweeps'
]),
self
.
sweeps_num
,
replace
=
False
)
for
idx
in
choices
:
sweep
=
results
[
'sweeps'
][
idx
]
points_sweep
=
self
.
_load_points
(
sweep
[
'data_path'
])
sweep
=
results
[
'lidar_sweeps'
][
idx
]
points_sweep
=
self
.
_load_points
(
sweep
[
'lidar_points'
][
'lidar_path'
])
points_sweep
=
np
.
copy
(
points_sweep
).
reshape
(
-
1
,
self
.
load_dim
)
if
self
.
remove_close
:
points_sweep
=
self
.
_remove_close
(
points_sweep
)
sweep_ts
=
sweep
[
'timestamp'
]
/
1e6
points_sweep
[:,
:
3
]
=
points_sweep
[:,
:
3
]
@
sweep
[
'sensor2lidar_rotation'
].
T
points_sweep
[:,
:
3
]
+=
sweep
[
'sensor2lidar_translation'
]
# bc-breaking: Timestamp has divided 1e6 in pkl infos.
sweep_ts
=
sweep
[
'timestamp'
]
lidar2cam
=
np
.
array
(
sweep
[
'lidar_points'
][
'lidar2sensor'
])
points_sweep
[:,
:
3
]
=
points_sweep
[:,
:
3
]
@
lidar2cam
[:
3
,
:
3
]
points_sweep
[:,
:
3
]
-=
lidar2cam
[:
3
,
3
]
points_sweep
[:,
4
]
=
ts
-
sweep_ts
points_sweep
=
points
.
new_point
(
points_sweep
)
sweep_points_list
.
append
(
points_sweep
)
...
...
mmdet3d/datasets/utils.py
View file @
ed115937
# Copyright (c) OpenMMLab. All rights reserved.
import
mmcv
import
numpy
as
np
from
mmcv.transforms
import
LoadImageFromFile
from
pyquaternion
import
Quaternion
# yapf: disable
from
mmdet3d.datasets.pipelines
import
(
LoadAnnotations3D
,
...
...
@@ -137,3 +139,14 @@ def extract_result_dict(results, key):
if
isinstance
(
data
,
mmcv
.
parallel
.
DataContainer
):
data
=
data
.
_data
return
data
def
convert_quaternion_to_matrix
(
quaternion
:
list
,
translation
:
list
=
None
)
->
list
:
"""Compute a transform matrix by given quaternion and translation
vector."""
result
=
np
.
eye
(
4
)
result
[:
3
,
:
3
]
=
Quaternion
(
quaternion
).
rotation_matrix
if
translation
is
not
None
:
result
[:
3
,
3
]
=
np
.
array
(
translation
)
return
result
.
astype
(
np
.
float32
).
tolist
()
tests/data/nuscenes/nus_info.pkl
View file @
ed115937
No preview for this file type
tests/test_data/test_datasets/test_nuscenes_dataset.py
0 → 100644
View file @
ed115937
# Copyright (c) OpenMMLab. All rights reserved.
import
numpy
as
np
from
mmcv.transforms.base
import
BaseTransform
from
mmengine.data
import
InstanceData
from
mmengine.registry
import
TRANSFORMS
from
mmdet3d.core.bbox
import
LiDARInstance3DBoxes
from
mmdet3d.core.data_structures
import
Det3DDataSample
from
mmdet3d.datasets
import
NuScenesDataset
def
_generate_nus_dataset_config
():
data_root
=
'tests/data/nuscenes'
ann_file
=
'nus_info.pkl'
classes
=
[
'car'
,
'truck'
,
'trailer'
,
'bus'
,
'construction_vehicle'
,
'bicycle'
,
'motorcycle'
,
'pedestrian'
,
'traffic_cone'
,
'barrier'
]
if
'Identity'
not
in
TRANSFORMS
:
@
TRANSFORMS
.
register_module
()
class
Identity
(
BaseTransform
):
def
transform
(
self
,
info
):
packed_input
=
dict
(
data_sample
=
Det3DDataSample
())
if
'ann_info'
in
info
:
packed_input
[
'data_sample'
].
gt_instances_3d
=
InstanceData
(
)
packed_input
[
'data_sample'
].
gt_instances_3d
.
labels_3d
=
info
[
'ann_info'
][
'gt_labels_3d'
]
return
packed_input
pipeline
=
[
dict
(
type
=
'Identity'
),
]
modality
=
dict
(
use_lidar
=
True
,
use_camera
=
True
)
data_prefix
=
dict
(
pts
=
'samples/LIDAR_TOP'
,
img
=
'samples/CAM_BACK_LEFT'
)
return
data_root
,
ann_file
,
classes
,
data_prefix
,
pipeline
,
modality
def
test_getitem
():
np
.
random
.
seed
(
0
)
data_root
,
ann_file
,
classes
,
data_prefix
,
pipeline
,
modality
=
\
_generate_nus_dataset_config
()
nus_dataset
=
NuScenesDataset
(
data_root
,
ann_file
,
data_prefix
=
data_prefix
,
pipeline
=
pipeline
,
metainfo
=
dict
(
CLASSES
=
classes
),
modality
=
modality
)
nus_dataset
.
prepare_data
(
0
)
input_dict
=
nus_dataset
.
get_data_info
(
0
)
# assert the the path should contains data_prefix and data_root
assert
data_prefix
[
'pts'
]
in
input_dict
[
'lidar_points'
][
'lidar_path'
]
assert
input_dict
[
'lidar_points'
][
'lidar_path'
]
==
'tests/data/nuscenes/samples/LIDAR_TOP/'
\
'n015-2018-08-02-17-16-37+0800__LIDAR_TOP__'
\
'1533201470948018.pcd.bin'
for
cam_id
,
img_info
in
input_dict
[
'images'
].
items
():
if
'img_path'
in
img_info
:
assert
data_prefix
[
'img'
]
in
img_info
[
'img_path'
]
assert
data_root
in
img_info
[
'img_path'
]
ann_info
=
nus_dataset
.
parse_ann_info
(
input_dict
)
# assert the keys in ann_info and the type
assert
'gt_labels_3d'
in
ann_info
assert
ann_info
[
'gt_labels_3d'
].
dtype
==
np
.
int64
assert
len
(
ann_info
[
'gt_labels_3d'
])
==
37
assert
'gt_bboxes_3d'
in
ann_info
assert
isinstance
(
ann_info
[
'gt_bboxes_3d'
],
LiDARInstance3DBoxes
)
assert
len
(
nus_dataset
.
metainfo
[
'CLASSES'
])
==
10
assert
input_dict
[
'token'
]
==
'fd8420396768425eabec9bdddf7e64b6'
assert
input_dict
[
'timestamp'
]
==
1533201470.448696
tools/data_converter/update_infos_to_v2.py
View file @
ed115937
...
...
@@ -15,6 +15,8 @@ from os import path as osp
import
mmcv
import
numpy
as
np
from
mmdet3d.datasets.utils
import
convert_quaternion_to_matrix
def
get_empty_instance
():
"""Empty annotation for single instance."""
...
...
@@ -156,6 +158,7 @@ def get_empty_standard_data_info():
radar_points
=
get_empty_radar_points
(),
# (list[dict], optional): Image sweeps data.
image_sweeps
=
[],
lidar_sweeps
=
[],
instances
=
[],
# (list[dict], optional): Required by object
# detection, instance to be ignored during training.
...
...
@@ -203,6 +206,116 @@ def clear_data_info_unused_keys(data_info):
return
data_info
,
empty_flag
def
update_nuscenes_infos
(
pkl_path
,
out_dir
):
print
(
f
'
{
pkl_path
}
will be modified.'
)
if
out_dir
in
pkl_path
:
print
(
f
'Warning, you may overwriting '
f
'the original data
{
pkl_path
}
.'
)
print
(
f
'Reading from input file:
{
pkl_path
}
.'
)
data_list
=
mmcv
.
load
(
pkl_path
)
METAINFO
=
{
'CLASSES'
:
(
'car'
,
'truck'
,
'trailer'
,
'bus'
,
'construction_vehicle'
,
'bicycle'
,
'motorcycle'
,
'pedestrian'
,
'traffic_cone'
,
'barrier'
),
'DATASET'
:
'Nuscenes'
,
'version'
:
data_list
[
'metadata'
][
'version'
]
}
print
(
'Start updating:'
)
converted_list
=
[]
for
i
,
ori_info_dict
in
enumerate
(
mmcv
.
track_iter_progress
(
data_list
[
'infos'
])):
temp_data_info
=
get_empty_standard_data_info
()
temp_data_info
[
'sample_idx'
]
=
i
temp_data_info
[
'token'
]
=
ori_info_dict
[
'token'
]
temp_data_info
[
'ego2global'
]
=
convert_quaternion_to_matrix
(
ori_info_dict
[
'ego2global_rotation'
],
ori_info_dict
[
'ego2global_translation'
])
temp_data_info
[
'lidar_points'
][
'lidar_path'
]
=
ori_info_dict
[
'lidar_path'
].
split
(
'/'
)[
-
1
]
temp_data_info
[
'lidar_points'
][
'lidar2ego'
]
=
convert_quaternion_to_matrix
(
ori_info_dict
[
'lidar2ego_rotation'
],
ori_info_dict
[
'lidar2ego_translation'
])
# bc-breaking: Timestamp has divided 1e6 in pkl infos.
temp_data_info
[
'timestamp'
]
=
ori_info_dict
[
'timestamp'
]
/
1e6
for
ori_sweep
in
ori_info_dict
[
'sweeps'
]:
temp_lidar_sweep
=
get_single_lidar_sweep
()
temp_lidar_sweep
[
'lidar_points'
][
'lidar2ego'
]
=
convert_quaternion_to_matrix
(
ori_sweep
[
'sensor2ego_rotation'
],
ori_sweep
[
'sensor2ego_translation'
])
temp_lidar_sweep
[
'ego2global'
]
=
convert_quaternion_to_matrix
(
ori_sweep
[
'ego2global_rotation'
],
ori_sweep
[
'ego2global_translation'
])
lidar2sensor
=
np
.
eye
(
4
)
lidar2sensor
[:
3
,
:
3
]
=
ori_sweep
[
'sensor2lidar_rotation'
].
T
lidar2sensor
[:
3
,
3
]
=
-
ori_sweep
[
'sensor2lidar_translation'
]
temp_lidar_sweep
[
'lidar_points'
][
'lidar2sensor'
]
=
lidar2sensor
.
astype
(
np
.
float32
).
tolist
()
temp_lidar_sweep
[
'timestamp'
]
=
ori_sweep
[
'timestamp'
]
/
1e6
temp_lidar_sweep
[
'lidar_points'
][
'lidar_path'
]
=
ori_sweep
[
'data_path'
]
temp_lidar_sweep
[
'sample_data_token'
]
=
ori_sweep
[
'sample_data_token'
]
temp_data_info
[
'lidar_sweeps'
].
append
(
temp_lidar_sweep
)
temp_data_info
[
'images'
]
=
{}
for
cam
in
ori_info_dict
[
'cams'
]:
empty_img_info
=
get_empty_img_info
()
empty_img_info
[
'img_path'
]
=
ori_info_dict
[
'cams'
][
cam
][
'data_path'
].
split
(
'/'
)[
-
1
]
empty_img_info
[
'cam2img'
]
=
ori_info_dict
[
'cams'
][
cam
][
'cam_intrinsic'
].
tolist
()
empty_img_info
[
'sample_data_token'
]
=
ori_info_dict
[
'cams'
][
cam
][
'sample_data_token'
]
# bc-breaking: Timestamp has divided 1e6 in pkl infos.
empty_img_info
[
'timestamp'
]
=
ori_info_dict
[
'cams'
][
cam
][
'timestamp'
]
/
1e6
empty_img_info
[
'cam2ego'
]
=
convert_quaternion_to_matrix
(
ori_info_dict
[
'cams'
][
cam
][
'sensor2ego_rotation'
],
ori_info_dict
[
'cams'
][
cam
][
'sensor2ego_translation'
])
lidar2sensor
=
np
.
eye
(
4
)
lidar2sensor
[:
3
,
:
3
]
=
ori_info_dict
[
'cams'
][
cam
][
'sensor2lidar_rotation'
].
T
lidar2sensor
[:
3
,
3
]
=
-
ori_info_dict
[
'cams'
][
cam
][
'sensor2lidar_translation'
]
empty_img_info
[
'lidar2cam'
]
=
lidar2sensor
.
astype
(
np
.
float32
).
tolist
()
temp_data_info
[
'images'
][
cam
]
=
empty_img_info
num_instances
=
ori_info_dict
[
'gt_boxes'
].
shape
[
0
]
ignore_class_name
=
set
()
for
i
in
range
(
num_instances
):
empty_instance
=
get_empty_instance
()
empty_instance
[
'bbox_3d'
]
=
ori_info_dict
[
'gt_boxes'
][
i
,
:].
tolist
()
if
ori_info_dict
[
'gt_names'
][
i
]
in
METAINFO
[
'CLASSES'
]:
empty_instance
[
'bbox_label'
]
=
METAINFO
[
'CLASSES'
].
index
(
ori_info_dict
[
'gt_names'
][
i
])
else
:
ignore_class_name
.
add
(
ori_info_dict
[
'gt_names'
][
i
])
empty_instance
[
'bbox_label'
]
=
-
1
empty_instance
[
'bbox_label_3d'
]
=
copy
.
deepcopy
(
empty_instance
[
'bbox_label'
])
empty_instance
[
'velocity'
]
=
ori_info_dict
[
'gt_velocity'
][
i
,
:].
tolist
()
empty_instance
[
'num_lidar_pts'
]
=
ori_info_dict
[
'num_lidar_pts'
][
i
]
empty_instance
[
'num_radar_pts'
]
=
ori_info_dict
[
'num_radar_pts'
][
i
]
empty_instance
[
'bbox_3d_isvalid'
]
=
ori_info_dict
[
'valid_flag'
][
i
]
empty_instance
=
clear_instance_unused_keys
(
empty_instance
)
temp_data_info
[
'instances'
].
append
(
empty_instance
)
temp_data_info
,
_
=
clear_data_info_unused_keys
(
temp_data_info
)
converted_list
.
append
(
temp_data_info
)
pkl_name
=
pkl_path
.
split
(
'/'
)[
-
1
]
out_path
=
osp
.
join
(
out_dir
,
pkl_name
)
print
(
f
'Writing to output file:
{
out_path
}
.'
)
print
(
f
'ignore classes:
{
ignore_class_name
}
'
)
converted_data_info
=
dict
(
metainfo
=
METAINFO
,
data_list
=
converted_list
)
mmcv
.
dump
(
converted_data_info
,
out_path
,
'pkl'
)
return
temp_lidar_sweep
def
update_kitti_infos
(
pkl_path
,
out_dir
):
print
(
f
'
{
pkl_path
}
will be modified.'
)
if
out_dir
in
pkl_path
:
...
...
@@ -479,6 +592,8 @@ def main():
update_scannet_infos
(
pkl_path
=
args
.
pkl
,
out_dir
=
args
.
out_dir
)
elif
args
.
dataset
.
lower
()
==
'sunrgbd'
:
update_sunrgbd_infos
(
pkl_path
=
args
.
pkl
,
out_dir
=
args
.
out_dir
)
elif
args
.
dataset
.
lower
()
==
'nuscenes'
:
update_nuscenes_infos
(
pkl_path
=
args
.
pkl
,
out_dir
=
args
.
out_dir
)
else
:
raise
NotImplementedError
(
f
'Do not support convert
{
args
.
dataset
}
to v2.'
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment