kitti-3d-car.py 4.22 KB
Newer Older
zhangwenwei's avatar
zhangwenwei committed
1
2
3
4
5
6
# dataset settings
dataset_type = 'KittiDataset'
data_root = 'data/kitti/'
class_names = ['Car']
point_cloud_range = [0, -40, -3, 70.4, 40, 1]
input_modality = dict(use_lidar=True, use_camera=False)
VVsssssk's avatar
VVsssssk committed
7
metainfo = dict(CLASSES=class_names)
zhangwenwei's avatar
zhangwenwei committed
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
db_sampler = dict(
    data_root=data_root,
    info_path=data_root + 'kitti_dbinfos_train.pkl',
    rate=1.0,
    prepare=dict(filter_by_difficulty=[-1], filter_by_min_points=dict(Car=5)),
    classes=class_names,
    sample_groups=dict(Car=15))
file_client_args = dict(backend='disk')
# Uncomment the following if use ceph or other file clients.
# See https://mmcv.readthedocs.io/en/latest/api.html#mmcv.fileio.FileClient
# for more details.
# file_client_args = dict(
#     backend='petrel', path_mapping=dict(data='s3://kitti_data/'))
train_pipeline = [
    dict(
        type='LoadPointsFromFile',
24
        coord_type='LIDAR',
zhangwenwei's avatar
zhangwenwei committed
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
        load_dim=4,
        use_dim=4,
        file_client_args=file_client_args),
    dict(
        type='LoadAnnotations3D',
        with_bbox_3d=True,
        with_label_3d=True,
        file_client_args=file_client_args),
    dict(type='ObjectSample', db_sampler=db_sampler),
    dict(
        type='ObjectNoise',
        num_try=100,
        translation_std=[1.0, 1.0, 0.5],
        global_rot_range=[0.0, 0.0],
        rot_range=[-0.78539816, 0.78539816]),
wuyuefeng's avatar
wuyuefeng committed
40
    dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5),
zhangwenwei's avatar
zhangwenwei committed
41
42
43
44
45
46
47
    dict(
        type='GlobalRotScaleTrans',
        rot_range=[-0.78539816, 0.78539816],
        scale_ratio_range=[0.95, 1.05]),
    dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range),
    dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range),
    dict(type='PointShuffle'),
VVsssssk's avatar
VVsssssk committed
48
49
50
    dict(
        type='Pack3DDetInputs',
        keys=['points', 'gt_bboxes_3d', 'gt_labels_3d'])
zhangwenwei's avatar
zhangwenwei committed
51
52
53
54
]
test_pipeline = [
    dict(
        type='LoadPointsFromFile',
55
        coord_type='LIDAR',
zhangwenwei's avatar
zhangwenwei committed
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
        load_dim=4,
        use_dim=4,
        file_client_args=file_client_args),
    dict(
        type='MultiScaleFlipAug3D',
        img_scale=(1333, 800),
        pts_scale_ratio=1,
        flip=False,
        transforms=[
            dict(
                type='GlobalRotScaleTrans',
                rot_range=[0, 0],
                scale_ratio_range=[1., 1.],
                translation_std=[0, 0, 0]),
            dict(type='RandomFlip3D'),
            dict(
                type='PointsRangeFilter', point_cloud_range=point_cloud_range),
VVsssssk's avatar
VVsssssk committed
73
            dict(type='Pack3DDetInputs', keys=['points']),
zhangwenwei's avatar
zhangwenwei committed
74
75
        ])
]
76
77
78
79
80
81
82
83
84
# construct a pipeline for data and gt loading in show function
# please keep its loading function consistent with test_pipeline (e.g. client)
eval_pipeline = [
    dict(
        type='LoadPointsFromFile',
        coord_type='LIDAR',
        load_dim=4,
        use_dim=4,
        file_client_args=file_client_args),
VVsssssk's avatar
VVsssssk committed
85
    dict(type='Pack3DDetInputs', keys=['points']),
86
]
VVsssssk's avatar
VVsssssk committed
87
train_dataloader = dict(
VVsssssk's avatar
VVsssssk committed
88
89
    batch_size=6,
    num_workers=4,
VVsssssk's avatar
VVsssssk committed
90
91
92
    persistent_workers=True,
    sampler=dict(type='DefaultSampler', shuffle=True),
    dataset=dict(
zhangwenwei's avatar
zhangwenwei committed
93
94
95
96
97
        type='RepeatDataset',
        times=2,
        dataset=dict(
            type=dataset_type,
            data_root=data_root,
VVsssssk's avatar
VVsssssk committed
98
99
            ann_file='kitti_infos_train.pkl',
            data_prefix=dict(pts='training/velodyne_reduced'),
zhangwenwei's avatar
zhangwenwei committed
100
101
            pipeline=train_pipeline,
            modality=input_modality,
wuyuefeng's avatar
Demo  
wuyuefeng committed
102
            test_mode=False,
VVsssssk's avatar
VVsssssk committed
103
            metainfo=metainfo,
wuyuefeng's avatar
Demo  
wuyuefeng committed
104
105
            # we use box_type_3d='LiDAR' in kitti and nuscenes dataset
            # and box_type_3d='Depth' in sunrgbd and scannet dataset.
VVsssssk's avatar
VVsssssk committed
106
107
108
109
110
111
112
113
            box_type_3d='LiDAR')))
val_dataloader = dict(
    batch_size=1,
    num_workers=1,
    persistent_workers=True,
    drop_last=False,
    sampler=dict(type='DefaultSampler', shuffle=False),
    dataset=dict(
zhangwenwei's avatar
zhangwenwei committed
114
115
        type=dataset_type,
        data_root=data_root,
VVsssssk's avatar
VVsssssk committed
116
117
118
        data_prefix=dict(pts='training/velodyne_reduced'),
        ann_file='kitti_infos_val.pkl',
        pipeline=eval_pipeline,
zhangwenwei's avatar
zhangwenwei committed
119
        modality=input_modality,
wuyuefeng's avatar
Demo  
wuyuefeng committed
120
        test_mode=True,
VVsssssk's avatar
VVsssssk committed
121
        metainfo=metainfo,
wuyuefeng's avatar
Demo  
wuyuefeng committed
122
        box_type_3d='LiDAR'))
VVsssssk's avatar
VVsssssk committed
123
test_dataloader = val_dataloader
VVsssssk's avatar
VVsssssk committed
124
test_dataloader['dataset']['pipeline'] = test_pipeline
VVsssssk's avatar
VVsssssk committed
125
126
127
128
129
val_evaluator = dict(
    type='KittiMetric',
    ann_file=data_root + 'kitti_infos_val.pkl',
    metric='bbox')
test_evaluator = val_evaluator