kitti-mono3d.py 2.73 KB
Newer Older
ZCMax's avatar
ZCMax committed
1
dataset_type = 'KittiDataset'
2
3
4
data_root = 'data/kitti/'
class_names = ['Pedestrian', 'Cyclist', 'Car']
input_modality = dict(use_lidar=False, use_camera=True)
5
metainfo = dict(classes=class_names)
ZCMax's avatar
ZCMax committed
6

7
file_client_args = dict(backend='disk')
ZCMax's avatar
ZCMax committed
8
9
10
# Uncomment the following if use ceph or other file clients.
# See https://mmcv.readthedocs.io/en/latest/api.html#mmcv.fileio.FileClient
# for more details.
11
12
# file_client_args = dict(
#     backend='petrel', path_mapping=dict(data='s3://kitti_data/'))
ZCMax's avatar
ZCMax committed
13

14
train_pipeline = [
15
    dict(type='LoadImageFromFileMono3D'),
16
17
18
19
20
21
22
23
    dict(
        type='LoadAnnotations3D',
        with_bbox=True,
        with_label=True,
        with_attr_label=False,
        with_bbox_3d=True,
        with_label_3d=True,
        with_bbox_depth=True),
ZCMax's avatar
ZCMax committed
24
    dict(type='Resize', scale=(1242, 375), keep_ratio=True),
25
26
    dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5),
    dict(
ZCMax's avatar
ZCMax committed
27
        type='Pack3DDetInputs',
28
        keys=[
29
30
            'img', 'gt_bboxes', 'gt_bboxes_labels', 'gt_bboxes_3d',
            'gt_labels_3d', 'centers_2d', 'depths'
31
32
33
        ]),
]
test_pipeline = [
34
    dict(type='LoadImageFromFileMono3D'),
ZCMax's avatar
ZCMax committed
35
36
    dict(type='Resize', scale=(1242, 375), keep_ratio=True),
    dict(type='Pack3DDetInputs', keys=['img'])
37
]
38
39
40
41
eval_pipeline = [
    dict(type='LoadImageFromFileMono3D'),
    dict(type='Pack3DDetInputs', keys=['img'])
]
ZCMax's avatar
ZCMax committed
42
43
44
45
46
47
48

train_dataloader = dict(
    batch_size=2,
    num_workers=2,
    persistent_workers=True,
    sampler=dict(type='DefaultSampler', shuffle=True),
    dataset=dict(
49
50
        type=dataset_type,
        data_root=data_root,
ZCMax's avatar
ZCMax committed
51
52
        ann_file='kitti_infos_train.pkl',
        data_prefix=dict(img='training/image_2'),
53
54
        pipeline=train_pipeline,
        modality=input_modality,
55
        load_type='fov_image_based',
56
        test_mode=False,
ZCMax's avatar
ZCMax committed
57
58
59
60
61
62
63
64
65
66
67
        metainfo=metainfo,
        # we use box_type_3d='Camera' in monocular 3d
        # detection task
        box_type_3d='Camera'))
val_dataloader = dict(
    batch_size=1,
    num_workers=2,
    persistent_workers=True,
    drop_last=False,
    sampler=dict(type='DefaultSampler', shuffle=False),
    dataset=dict(
68
69
        type=dataset_type,
        data_root=data_root,
ZCMax's avatar
ZCMax committed
70
71
        data_prefix=dict(img='training/image_2'),
        ann_file='kitti_infos_val.pkl',
72
73
        pipeline=test_pipeline,
        modality=input_modality,
74
        load_type='fov_image_based',
ZCMax's avatar
ZCMax committed
75
        metainfo=metainfo,
76
77
        test_mode=True,
        box_type_3d='Camera'))
ZCMax's avatar
ZCMax committed
78
79
80
81
82
83
84
85
86
test_dataloader = val_dataloader

val_evaluator = dict(
    type='KittiMetric',
    ann_file=data_root + 'kitti_infos_val.pkl',
    metric='bbox',
    pred_box_type_3d='Camera')

test_evaluator = val_evaluator
87
88
89
90

vis_backends = [dict(type='LocalVisBackend')]
visualizer = dict(
    type='Det3DLocalVisualizer', vis_backends=vis_backends, name='visualizer')