Commit 7bb011af authored by VVsssssk's avatar VVsssssk Committed by ChaimZhu
Browse files

refactor kitti dataset cfg

parent de58f9ee
...@@ -4,6 +4,7 @@ data_root = 'data/kitti/' ...@@ -4,6 +4,7 @@ data_root = 'data/kitti/'
class_names = ['Pedestrian', 'Cyclist', 'Car'] class_names = ['Pedestrian', 'Cyclist', 'Car']
point_cloud_range = [0, -40, -3, 70.4, 40, 1] point_cloud_range = [0, -40, -3, 70.4, 40, 1]
input_modality = dict(use_lidar=True, use_camera=False) input_modality = dict(use_lidar=True, use_camera=False)
metainfo = dict(CLASSES=class_names)
db_sampler = dict( db_sampler = dict(
data_root=data_root, data_root=data_root,
info_path=data_root + 'kitti_dbinfos_train.pkl', info_path=data_root + 'kitti_dbinfos_train.pkl',
...@@ -13,14 +14,12 @@ db_sampler = dict( ...@@ -13,14 +14,12 @@ db_sampler = dict(
filter_by_min_points=dict(Car=5, Pedestrian=10, Cyclist=10)), filter_by_min_points=dict(Car=5, Pedestrian=10, Cyclist=10)),
classes=class_names, classes=class_names,
sample_groups=dict(Car=12, Pedestrian=6, Cyclist=6)) sample_groups=dict(Car=12, Pedestrian=6, Cyclist=6))
file_client_args = dict(backend='disk') file_client_args = dict(backend='disk')
# Uncomment the following if use ceph or other file clients. # Uncomment the following if use ceph or other file clients.
# See https://mmcv.readthedocs.io/en/latest/api.html#mmcv.fileio.FileClient # See https://mmcv.readthedocs.io/en/latest/api.html#mmcv.fileio.FileClient
# for more details. # for more details.
# file_client_args = dict( # file_client_args = dict(
# backend='petrel', path_mapping=dict(data='s3://kitti_data/')) # backend='petrel', path_mapping=dict(data='s3://kitti_data/'))
train_pipeline = [ train_pipeline = [
dict( dict(
type='LoadPointsFromFile', type='LoadPointsFromFile',
...@@ -48,8 +47,9 @@ train_pipeline = [ ...@@ -48,8 +47,9 @@ train_pipeline = [
dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range),
dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range),
dict(type='PointShuffle'), dict(type='PointShuffle'),
dict(type='DefaultFormatBundle3D', class_names=class_names), dict(
dict(type='Collect3D', keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) type='Pack3DDetInputs',
keys=['points', 'gt_bboxes_3d', 'gt_labels_3d'])
] ]
test_pipeline = [ test_pipeline = [
dict( dict(
...@@ -72,11 +72,7 @@ test_pipeline = [ ...@@ -72,11 +72,7 @@ test_pipeline = [
dict(type='RandomFlip3D'), dict(type='RandomFlip3D'),
dict( dict(
type='PointsRangeFilter', point_cloud_range=point_cloud_range), type='PointsRangeFilter', point_cloud_range=point_cloud_range),
dict( dict(type='Pack3DDetInputs', keys=['points']),
type='DefaultFormatBundle3D',
class_names=class_names,
with_label=False),
dict(type='Collect3D', keys=['points'])
]) ])
] ]
# construct a pipeline for data and gt loading in show function # construct a pipeline for data and gt loading in show function
...@@ -88,53 +84,48 @@ eval_pipeline = [ ...@@ -88,53 +84,48 @@ eval_pipeline = [
load_dim=4, load_dim=4,
use_dim=4, use_dim=4,
file_client_args=file_client_args), file_client_args=file_client_args),
dict( dict(type='Pack3DDetInputs', keys=['points']),
type='DefaultFormatBundle3D',
class_names=class_names,
with_label=False),
dict(type='Collect3D', keys=['points'])
] ]
train_dataloader = dict(
data = dict( batch_size=2,
samples_per_gpu=6, num_workers=2,
workers_per_gpu=4, persistent_workers=True,
train=dict( sampler=dict(type='DefaultSampler', shuffle=True),
dataset=dict(
type='RepeatDataset', type='RepeatDataset',
times=2, times=2,
dataset=dict( dataset=dict(
type=dataset_type, type=dataset_type,
data_root=data_root, data_root=data_root,
ann_file=data_root + 'kitti_infos_train.pkl', ann_file='kitti_infos_train.pkl',
split='training', data_prefix=dict(pts='training/velodyne_reduced'),
pts_prefix='velodyne_reduced',
pipeline=train_pipeline, pipeline=train_pipeline,
modality=input_modality, modality=input_modality,
classes=class_names,
test_mode=False, test_mode=False,
metainfo=metainfo,
# we use box_type_3d='LiDAR' in kitti and nuscenes dataset # we use box_type_3d='LiDAR' in kitti and nuscenes dataset
# and box_type_3d='Depth' in sunrgbd and scannet dataset. # and box_type_3d='Depth' in sunrgbd and scannet dataset.
box_type_3d='LiDAR')), box_type_3d='LiDAR')))
val=dict( val_dataloader = dict(
type=dataset_type, batch_size=1,
data_root=data_root, num_workers=1,
ann_file=data_root + 'kitti_infos_val.pkl', persistent_workers=True,
split='training', drop_last=False,
pts_prefix='velodyne_reduced', sampler=dict(type='DefaultSampler', shuffle=False),
pipeline=test_pipeline, dataset=dict(
modality=input_modality,
classes=class_names,
test_mode=True,
box_type_3d='LiDAR'),
test=dict(
type=dataset_type, type=dataset_type,
data_root=data_root, data_root=data_root,
ann_file=data_root + 'kitti_infos_val.pkl', data_prefix=dict(pts='training/velodyne_reduced'),
split='training', ann_file='kitti_infos_val.pkl',
pts_prefix='velodyne_reduced', pipeline=eval_pipeline,
pipeline=test_pipeline,
modality=input_modality, modality=input_modality,
classes=class_names,
test_mode=True, test_mode=True,
metainfo=metainfo,
box_type_3d='LiDAR')) box_type_3d='LiDAR'))
test_dataloader = val_dataloader
evaluation = dict(interval=1, pipeline=eval_pipeline) test_dataloader = dict(dataset=dict(pipeline=test_pipeline))
val_evaluator = dict(
type='KittiMetric',
ann_file=data_root + 'kitti_infos_val.pkl',
metric='bbox')
test_evaluator = val_evaluator
...@@ -4,6 +4,7 @@ data_root = 'data/kitti/' ...@@ -4,6 +4,7 @@ data_root = 'data/kitti/'
class_names = ['Car'] class_names = ['Car']
point_cloud_range = [0, -40, -3, 70.4, 40, 1] point_cloud_range = [0, -40, -3, 70.4, 40, 1]
input_modality = dict(use_lidar=True, use_camera=False) input_modality = dict(use_lidar=True, use_camera=False)
metainfo = dict(CLASSES=class_names)
db_sampler = dict( db_sampler = dict(
data_root=data_root, data_root=data_root,
info_path=data_root + 'kitti_dbinfos_train.pkl', info_path=data_root + 'kitti_dbinfos_train.pkl',
...@@ -11,14 +12,12 @@ db_sampler = dict( ...@@ -11,14 +12,12 @@ db_sampler = dict(
prepare=dict(filter_by_difficulty=[-1], filter_by_min_points=dict(Car=5)), prepare=dict(filter_by_difficulty=[-1], filter_by_min_points=dict(Car=5)),
classes=class_names, classes=class_names,
sample_groups=dict(Car=15)) sample_groups=dict(Car=15))
file_client_args = dict(backend='disk') file_client_args = dict(backend='disk')
# Uncomment the following if use ceph or other file clients. # Uncomment the following if use ceph or other file clients.
# See https://mmcv.readthedocs.io/en/latest/api.html#mmcv.fileio.FileClient # See https://mmcv.readthedocs.io/en/latest/api.html#mmcv.fileio.FileClient
# for more details. # for more details.
# file_client_args = dict( # file_client_args = dict(
# backend='petrel', path_mapping=dict(data='s3://kitti_data/')) # backend='petrel', path_mapping=dict(data='s3://kitti_data/'))
train_pipeline = [ train_pipeline = [
dict( dict(
type='LoadPointsFromFile', type='LoadPointsFromFile',
...@@ -46,8 +45,9 @@ train_pipeline = [ ...@@ -46,8 +45,9 @@ train_pipeline = [
dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range),
dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range),
dict(type='PointShuffle'), dict(type='PointShuffle'),
dict(type='DefaultFormatBundle3D', class_names=class_names), dict(
dict(type='Collect3D', keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) type='Pack3DDetInputs',
keys=['points', 'gt_bboxes_3d', 'gt_labels_3d'])
] ]
test_pipeline = [ test_pipeline = [
dict( dict(
...@@ -70,11 +70,7 @@ test_pipeline = [ ...@@ -70,11 +70,7 @@ test_pipeline = [
dict(type='RandomFlip3D'), dict(type='RandomFlip3D'),
dict( dict(
type='PointsRangeFilter', point_cloud_range=point_cloud_range), type='PointsRangeFilter', point_cloud_range=point_cloud_range),
dict( dict(type='Pack3DDetInputs', keys=['points']),
type='DefaultFormatBundle3D',
class_names=class_names,
with_label=False),
dict(type='Collect3D', keys=['points'])
]) ])
] ]
# construct a pipeline for data and gt loading in show function # construct a pipeline for data and gt loading in show function
...@@ -86,53 +82,50 @@ eval_pipeline = [ ...@@ -86,53 +82,50 @@ eval_pipeline = [
load_dim=4, load_dim=4,
use_dim=4, use_dim=4,
file_client_args=file_client_args), file_client_args=file_client_args),
dict( dict(type='Pack3DDetInputs', keys=['points']),
type='DefaultFormatBundle3D',
class_names=class_names,
with_label=False),
dict(type='Collect3D', keys=['points'])
] ]
train_dataloader = dict(
data = dict( batch_size=2,
samples_per_gpu=6, num_workers=2,
workers_per_gpu=4, persistent_workers=True,
train=dict( sampler=dict(type='DefaultSampler', shuffle=True),
dataset=dict(
type='RepeatDataset', type='RepeatDataset',
times=2, times=2,
dataset=dict( dataset=dict(
type=dataset_type, type=dataset_type,
data_root=data_root, data_root=data_root,
ann_file=data_root + 'kitti_infos_train.pkl', ann_file='kitti_infos_train.pkl',
split='training', data_prefix=dict(pts='training/velodyne_reduced'),
pts_prefix='velodyne_reduced',
pipeline=train_pipeline, pipeline=train_pipeline,
modality=input_modality, modality=input_modality,
classes=class_names, classes=class_names,
test_mode=False, test_mode=False,
metainfo=metainfo,
# we use box_type_3d='LiDAR' in kitti and nuscenes dataset # we use box_type_3d='LiDAR' in kitti and nuscenes dataset
# and box_type_3d='Depth' in sunrgbd and scannet dataset. # and box_type_3d='Depth' in sunrgbd and scannet dataset.
box_type_3d='LiDAR')), box_type_3d='LiDAR')))
val=dict( val_dataloader = dict(
type=dataset_type, batch_size=1,
data_root=data_root, num_workers=1,
ann_file=data_root + 'kitti_infos_val.pkl', persistent_workers=True,
split='training', drop_last=False,
pts_prefix='velodyne_reduced', sampler=dict(type='DefaultSampler', shuffle=False),
pipeline=test_pipeline, dataset=dict(
modality=input_modality,
classes=class_names,
test_mode=True,
box_type_3d='LiDAR'),
test=dict(
type=dataset_type, type=dataset_type,
data_root=data_root, data_root=data_root,
ann_file=data_root + 'kitti_infos_val.pkl', data_prefix=dict(pts='training/velodyne_reduced'),
split='training', ann_file='kitti_infos_val.pkl',
pts_prefix='velodyne_reduced', pipeline=eval_pipeline,
pipeline=test_pipeline,
modality=input_modality, modality=input_modality,
classes=class_names, classes=class_names,
test_mode=True, test_mode=True,
metainfo=metainfo,
box_type_3d='LiDAR')) box_type_3d='LiDAR'))
test_dataloader = val_dataloader
evaluation = dict(interval=1, pipeline=eval_pipeline) test_dataloader = dict(dataset=dict(pipeline=test_pipeline))
val_evaluator = dict(
type='KittiMetric',
ann_file=data_root + 'kitti_infos_val.pkl',
metric='bbox')
test_evaluator = val_evaluator
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment