hv_pointpillars_secfpn_6x8_160e_kitti-3d-3class.py 3.25 KB
Newer Older
zhangwenwei's avatar
zhangwenwei committed
1
2
3
4
5
_base_ = [
    '../_base_/models/hv_pointpillars_secfpn.py',
    '../_base_/datasets/kitti-3d-3class.py',
    '../_base_/schedules/cyclic_40e.py', '../_base_/default_runtime.py'
]
6

zhangwenwei's avatar
zhangwenwei committed
7
point_cloud_range = [0, -39.68, -3, 69.12, 39.68, 1]
8
9
10
# dataset settings
data_root = 'data/kitti/'
class_names = ['Pedestrian', 'Cyclist', 'Car']
zhangwenwei's avatar
zhangwenwei committed
11
# PointPillars adopted a different sampling strategies among classes
12
13
14
15
16
17
18
db_sampler = dict(
    data_root=data_root,
    info_path=data_root + 'kitti_dbinfos_train.pkl',
    rate=1.0,
    object_rot_range=[0.0, 0.0],
    prepare=dict(
        filter_by_difficulty=[-1],
zhangwenwei's avatar
zhangwenwei committed
19
        filter_by_min_points=dict(Car=5, Pedestrian=10, Cyclist=10)),
20
    classes=class_names,
zhangwenwei's avatar
zhangwenwei committed
21
    sample_groups=dict(Car=15, Pedestrian=10, Cyclist=10))
22

zhangwenwei's avatar
zhangwenwei committed
23
# PointPillars uses different augmentation hyper parameters
24
25
26
27
28
29
30
train_pipeline = [
    dict(type='LoadPointsFromFile', load_dim=4, use_dim=4),
    dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True),
    dict(type='ObjectSample', db_sampler=db_sampler),
    dict(
        type='ObjectNoise',
        num_try=100,
zhangwenwei's avatar
zhangwenwei committed
31
        translation_std=[0.25, 0.25, 0.25],
32
        global_rot_range=[0.0, 0.0],
zhangwenwei's avatar
zhangwenwei committed
33
        rot_range=[-0.15707963267, 0.15707963267]),
34
35
    dict(type='RandomFlip3D', flip_ratio=0.5),
    dict(
zhangwenwei's avatar
zhangwenwei committed
36
37
38
        type='GlobalRotScaleTrans',
        rot_range=[-0.78539816, 0.78539816],
        scale_ratio_range=[0.95, 1.05]),
39
40
41
42
43
44
45
46
47
    dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range),
    dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range),
    dict(type='PointShuffle'),
    dict(type='DefaultFormatBundle3D', class_names=class_names),
    dict(type='Collect3D', keys=['points', 'gt_bboxes_3d', 'gt_labels_3d'])
]
test_pipeline = [
    dict(type='LoadPointsFromFile', load_dim=4, use_dim=4),
    dict(
zhangwenwei's avatar
zhangwenwei committed
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
        type='MultiScaleFlipAug3D',
        img_scale=(1333, 800),
        pts_scale_ratio=1,
        flip=False,
        transforms=[
            dict(
                type='GlobalRotScaleTrans',
                rot_range=[0, 0],
                scale_ratio_range=[1., 1.],
                translation_std=[0, 0, 0]),
            dict(type='RandomFlip3D'),
            dict(
                type='PointsRangeFilter', point_cloud_range=point_cloud_range),
            dict(
                type='DefaultFormatBundle3D',
                class_names=class_names,
                with_label=False),
            dict(type='Collect3D', keys=['points'])
        ])
67
68
69
]

data = dict(
zhangwenwei's avatar
zhangwenwei committed
70
71
72
73
74
    train=dict(dataset=dict(pipeline=train_pipeline, classes=class_names)),
    val=dict(pipeline=test_pipeline, classes=class_names),
    test=dict(pipeline=test_pipeline, classes=class_names))

# In practice PointPillars also uses a different schedule
75
# optimizer
zhangwenwei's avatar
zhangwenwei committed
76
77
78
79
80
lr = 0.001
optimizer = dict(lr=lr)
# max_norm=35 is slightly better than 10 for PointPillars in the earlier
# development of the codebase thus we keep the setting. But we does not
# specifically tune this parameter.
81
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
zhangwenwei's avatar
zhangwenwei committed
82
# Use evaluation interval=2 reduce the number of evaluation timese
83
evaluation = dict(interval=2)
zhangwenwei's avatar
zhangwenwei committed
84
85
86
# PointPillars usually need longer schedule than second, we simply double
# the training schedule. Do remind that since we use RepeatDataset and
# repeat factor is 2, so we actually train 160 epochs.
87
total_epochs = 80