kitti-mono3d.py 2.61 KB
Newer Older
ZCMax's avatar
ZCMax committed
1
dataset_type = 'KittiDataset'
2
3
4
data_root = 'data/kitti/'
class_names = ['Pedestrian', 'Cyclist', 'Car']
input_modality = dict(use_lidar=False, use_camera=True)
ZCMax's avatar
ZCMax committed
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
metainfo = dict(CLASSES=class_names)

# file_client_args = dict(backend='disk')
# Uncomment the following if use ceph or other file clients.
# See https://mmcv.readthedocs.io/en/latest/api.html#mmcv.fileio.FileClient
# for more details.
file_client_args = dict(
    backend='petrel',
    path_mapping=dict({
        './data/kitti/':
        's3://openmmlab/datasets/detection3d/kitti/',
        'data/kitti/':
        's3://openmmlab/datasets/detection3d/kitti/'
    }))

20
train_pipeline = [
ZCMax's avatar
ZCMax committed
21
    dict(type='LoadImageFromFileMono3D', file_client_args=file_client_args),
22
23
24
25
26
27
28
29
    dict(
        type='LoadAnnotations3D',
        with_bbox=True,
        with_label=True,
        with_attr_label=False,
        with_bbox_3d=True,
        with_label_3d=True,
        with_bbox_depth=True),
ZCMax's avatar
ZCMax committed
30
    dict(type='Resize', scale=(1242, 375), keep_ratio=True),
31
32
    dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5),
    dict(
ZCMax's avatar
ZCMax committed
33
        type='Pack3DDetInputs',
34
35
        keys=[
            'img', 'gt_bboxes', 'gt_labels', 'gt_bboxes_3d', 'gt_labels_3d',
ZCMax's avatar
ZCMax committed
36
            'centers_2d', 'depths'
37
38
39
        ]),
]
test_pipeline = [
ZCMax's avatar
ZCMax committed
40
41
42
    dict(type='LoadImageFromFileMono3D', file_client_args=file_client_args),
    dict(type='Resize', scale=(1242, 375), keep_ratio=True),
    dict(type='Pack3DDetInputs', keys=['img'])
43
]
ZCMax's avatar
ZCMax committed
44
45
46
47
48
49
50

train_dataloader = dict(
    batch_size=2,
    num_workers=2,
    persistent_workers=True,
    sampler=dict(type='DefaultSampler', shuffle=True),
    dataset=dict(
51
52
        type=dataset_type,
        data_root=data_root,
ZCMax's avatar
ZCMax committed
53
54
        ann_file='kitti_infos_train.pkl',
        data_prefix=dict(img='training/image_2'),
55
56
57
        pipeline=train_pipeline,
        modality=input_modality,
        test_mode=False,
ZCMax's avatar
ZCMax committed
58
59
60
61
62
63
64
65
66
67
68
        metainfo=metainfo,
        # we use box_type_3d='Camera' in monocular 3d
        # detection task
        box_type_3d='Camera'))
val_dataloader = dict(
    batch_size=1,
    num_workers=2,
    persistent_workers=True,
    drop_last=False,
    sampler=dict(type='DefaultSampler', shuffle=False),
    dataset=dict(
69
70
        type=dataset_type,
        data_root=data_root,
ZCMax's avatar
ZCMax committed
71
72
        data_prefix=dict(img='training/image_2'),
        ann_file='kitti_infos_val.pkl',
73
74
        pipeline=test_pipeline,
        modality=input_modality,
ZCMax's avatar
ZCMax committed
75
        metainfo=metainfo,
76
77
        test_mode=True,
        box_type_3d='Camera'))
ZCMax's avatar
ZCMax committed
78
79
80
81
82
83
84
85
86
test_dataloader = val_dataloader

val_evaluator = dict(
    type='KittiMetric',
    ann_file=data_root + 'kitti_infos_val.pkl',
    metric='bbox',
    pred_box_type_3d='Camera')

test_evaluator = val_evaluator