".github/vscode:/vscode.git/clone" did not exist on "57cc5385c0c58c1925ab1da5050aedf987b72cc0"
Unverified Commit 6c03a971 authored by Tai-Wang's avatar Tai-Wang Committed by GitHub
Browse files

Release v1.1.0rc1

Release v1.1.0rc1
parents 9611c2d0 ca42c312
...@@ -4,7 +4,7 @@ default_hooks = dict( ...@@ -4,7 +4,7 @@ default_hooks = dict(
timer=dict(type='IterTimerHook'), timer=dict(type='IterTimerHook'),
logger=dict(type='LoggerHook', interval=50), logger=dict(type='LoggerHook', interval=50),
param_scheduler=dict(type='ParamSchedulerHook'), param_scheduler=dict(type='ParamSchedulerHook'),
checkpoint=dict(type='CheckpointHook', interval=1), checkpoint=dict(type='CheckpointHook', interval=-1),
sampler_seed=dict(type='DistSamplerSeedHook'), sampler_seed=dict(type='DistSamplerSeedHook'),
visualization=dict(type='Det3DVisualizationHook')) visualization=dict(type='Det3DVisualizationHook'))
...@@ -14,9 +14,6 @@ env_cfg = dict( ...@@ -14,9 +14,6 @@ env_cfg = dict(
dist_cfg=dict(backend='nccl'), dist_cfg=dict(backend='nccl'),
) )
vis_backends = [dict(type='LocalVisBackend')]
visualizer = dict(
type='Det3DLocalVisualizer', vis_backends=vis_backends, name='visualizer')
log_processor = dict(type='LogProcessor', window_size=50, by_epoch=True) log_processor = dict(type='LogProcessor', window_size=50, by_epoch=True)
log_level = 'INFO' log_level = 'INFO'
......
model = dict(
type='MultiViewDfM',
data_preprocessor=dict(
type='Det3DDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32),
backbone=dict(
type='mmdet.ResNet',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'),
dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, False, True, True)),
neck=dict(
type='mmdet.FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=64,
num_outs=4),
neck_2d=None,
bbox_head_2d=None,
backbone_stereo=None,
depth_head=None,
backbone_3d=None,
neck_3d=dict(type='OutdoorImVoxelNeck', in_channels=64, out_channels=256),
valid_sample=True,
voxel_size=(0.5, 0.5, 0.5), # n_voxels=[240, 300, 12]
anchor_generator=dict(
type='AlignedAnchor3DRangeGenerator',
ranges=[[-35.0, -75.0, -2, 75.0, 75.0, 4]],
rotations=[.0]),
bbox_head=dict(
type='Anchor3DHead',
num_classes=3,
in_channels=256,
feat_channels=256,
use_direction_classifier=True,
anchor_generator=dict(
type='AlignedAnchor3DRangeGenerator',
ranges=[[-35.0, -75.0, -0.0345, 75.0, 75.0, -0.0345],
[-35.0, -75.0, 0, 75.0, 75.0, 0],
[-35.0, -75.0, -0.1188, 75.0, 75.0, -0.1188]],
sizes=[
[4.73, 2.08, 1.77], # car
[0.91, 0.84, 1.74], # pedestrian
[1.81, 0.84, 1.77], # cyclist
],
rotations=[0, 1.57],
reshape_out=False),
diff_rad_by_sin=True,
dir_offset=-0.7854, # -pi / 4
bbox_coder=dict(type='DeltaXYZWLHRBBoxCoder'),
loss_cls=dict(
type='mmdet.FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(
type='mmdet.SmoothL1Loss', beta=1.0 / 9.0, loss_weight=2.0),
loss_dir=dict(
type='mmdet.CrossEntropyLoss', use_sigmoid=False,
loss_weight=0.2)),
train_cfg=dict(
assigner=[
dict( # for Car
type='Max3DIoUAssigner',
iou_calculator=dict(type='BboxOverlapsNearest3D'),
pos_iou_thr=0.6,
neg_iou_thr=0.45,
min_pos_iou=0.45,
ignore_iof_thr=-1),
dict( # for Pedestrian
type='Max3DIoUAssigner',
iou_calculator=dict(type='BboxOverlapsNearest3D'),
pos_iou_thr=0.5,
neg_iou_thr=0.35,
min_pos_iou=0.35,
ignore_iof_thr=-1),
dict( # for Cyclist
type='Max3DIoUAssigner',
iou_calculator=dict(type='BboxOverlapsNearest3D'),
pos_iou_thr=0.5,
neg_iou_thr=0.35,
min_pos_iou=0.35,
ignore_iof_thr=-1),
],
allowed_border=0,
pos_weight=-1,
debug=False),
test_cfg=dict(
use_rotate_nms=True,
nms_across_levels=False,
nms_thr=0.05,
score_thr=0.001,
min_bbox_size=0,
nms_pre=500,
max_num=100))
model = dict( model = dict(
type='PointRCNN', type='PointRCNN',
data_preprocessor=dict(type='Det3DDataPreprocessor'),
backbone=dict( backbone=dict(
type='PointNet2SAMSG', type='PointNet2SAMSG',
in_channels=4, in_channels=4,
...@@ -34,14 +35,14 @@ model = dict( ...@@ -34,14 +35,14 @@ model = dict(
cls_linear_channels=(256, 256), cls_linear_channels=(256, 256),
reg_linear_channels=(256, 256)), reg_linear_channels=(256, 256)),
cls_loss=dict( cls_loss=dict(
type='FocalLoss', type='mmdet.FocalLoss',
use_sigmoid=True, use_sigmoid=True,
reduction='sum', reduction='sum',
gamma=2.0, gamma=2.0,
alpha=0.25, alpha=0.25,
loss_weight=1.0), loss_weight=1.0),
bbox_loss=dict( bbox_loss=dict(
type='SmoothL1Loss', type='mmdet.SmoothL1Loss',
beta=1.0 / 9.0, beta=1.0 / 9.0,
reduction='sum', reduction='sum',
loss_weight=1.0), loss_weight=1.0),
...@@ -55,12 +56,22 @@ model = dict( ...@@ -55,12 +56,22 @@ model = dict(
1.73]])), 1.73]])),
roi_head=dict( roi_head=dict(
type='PointRCNNRoIHead', type='PointRCNNRoIHead',
point_roi_extractor=dict( bbox_roi_extractor=dict(
type='Single3DRoIPointExtractor', type='Single3DRoIPointExtractor',
roi_layer=dict(type='RoIPointPool3d', num_sampled_points=512)), roi_layer=dict(type='RoIPointPool3d', num_sampled_points=512)),
bbox_head=dict( bbox_head=dict(
type='PointRCNNBboxHead', type='PointRCNNBboxHead',
num_classes=1, num_classes=1,
loss_bbox=dict(
type='mmdet.SmoothL1Loss',
beta=1.0 / 9.0,
reduction='sum',
loss_weight=1.0),
loss_cls=dict(
type='mmdet.CrossEntropyLoss',
use_sigmoid=True,
reduction='sum',
loss_weight=1.0),
pred_layer_cfg=dict( pred_layer_cfg=dict(
in_channels=512, in_channels=512,
cls_conv_channels=(256, 256), cls_conv_channels=(256, 256),
...@@ -79,13 +90,16 @@ model = dict( ...@@ -79,13 +90,16 @@ model = dict(
train_cfg=dict( train_cfg=dict(
pos_distance_thr=10.0, pos_distance_thr=10.0,
rpn=dict( rpn=dict(
nms_cfg=dict( rpn_proposal=dict(
use_rotate_nms=True, iou_thr=0.8, nms_pre=9000, nms_post=512), use_rotate_nms=True,
score_thr=None), score_thr=None,
iou_thr=0.8,
nms_pre=9000,
nms_post=512)),
rcnn=dict( rcnn=dict(
assigner=[ assigner=[
dict( # for Car dict( # for Pedestrian
type='MaxIoUAssigner', type='Max3DIoUAssigner',
iou_calculator=dict( iou_calculator=dict(
type='BboxOverlaps3D', coordinate='lidar'), type='BboxOverlaps3D', coordinate='lidar'),
pos_iou_thr=0.55, pos_iou_thr=0.55,
...@@ -93,8 +107,8 @@ model = dict( ...@@ -93,8 +107,8 @@ model = dict(
min_pos_iou=0.55, min_pos_iou=0.55,
ignore_iof_thr=-1, ignore_iof_thr=-1,
match_low_quality=False), match_low_quality=False),
dict( # for Pedestrian dict( # for Cyclist
type='MaxIoUAssigner', type='Max3DIoUAssigner',
iou_calculator=dict( iou_calculator=dict(
type='BboxOverlaps3D', coordinate='lidar'), type='BboxOverlaps3D', coordinate='lidar'),
pos_iou_thr=0.55, pos_iou_thr=0.55,
...@@ -102,8 +116,8 @@ model = dict( ...@@ -102,8 +116,8 @@ model = dict(
min_pos_iou=0.55, min_pos_iou=0.55,
ignore_iof_thr=-1, ignore_iof_thr=-1,
match_low_quality=False), match_low_quality=False),
dict( # for Cyclist dict( # for Car
type='MaxIoUAssigner', type='Max3DIoUAssigner',
iou_calculator=dict( iou_calculator=dict(
type='BboxOverlaps3D', coordinate='lidar'), type='BboxOverlaps3D', coordinate='lidar'),
pos_iou_thr=0.55, pos_iou_thr=0.55,
...@@ -126,6 +140,9 @@ model = dict( ...@@ -126,6 +140,9 @@ model = dict(
test_cfg=dict( test_cfg=dict(
rpn=dict( rpn=dict(
nms_cfg=dict( nms_cfg=dict(
use_rotate_nms=True, iou_thr=0.85, nms_pre=9000, nms_post=512), use_rotate_nms=True,
score_thr=None), iou_thr=0.85,
nms_pre=9000,
nms_post=512,
score_thr=None)),
rcnn=dict(use_rotate_nms=True, nms_thr=0.1, score_thr=0.1))) rcnn=dict(use_rotate_nms=True, nms_thr=0.1, score_thr=0.1)))
...@@ -22,3 +22,9 @@ param_scheduler = [ ...@@ -22,3 +22,9 @@ param_scheduler = [
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=40, val_interval=1) train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=40, val_interval=1)
val_cfg = dict(type='ValLoop') val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop') test_cfg = dict(type='TestLoop')
# Default setting for scaling LR automatically
# - `enable` means enable scaling LR automatically
# or not by default.
# - `base_batch_size` = (8 GPUs) x (2 samples per GPU).
auto_scale_lr = dict(enable=False, base_batch_size=16)
...@@ -57,3 +57,9 @@ param_scheduler = [ ...@@ -57,3 +57,9 @@ param_scheduler = [
train_cfg = dict(by_epoch=True, max_epochs=20, val_interval=20) train_cfg = dict(by_epoch=True, max_epochs=20, val_interval=20)
val_cfg = dict() val_cfg = dict()
test_cfg = dict() test_cfg = dict()
# Default setting for scaling LR automatically
# - `enable` means enable scaling LR automatically
# or not by default.
# - `base_batch_size` = (8 GPUs) x (4 samples per GPU).
auto_scale_lr = dict(enable=False, base_batch_size=32)
...@@ -59,3 +59,9 @@ param_scheduler = [ ...@@ -59,3 +59,9 @@ param_scheduler = [
train_cfg = dict(by_epoch=True, max_epochs=40, val_interval=1) train_cfg = dict(by_epoch=True, max_epochs=40, val_interval=1)
val_cfg = dict() val_cfg = dict()
test_cfg = dict() test_cfg = dict()
# Default setting for scaling LR automatically
# - `enable` means enable scaling LR automatically
# or not by default.
# - `base_batch_size` = (8 GPUs) x (6 samples per GPU).
auto_scale_lr = dict(enable=False, base_batch_size=48)
...@@ -20,3 +20,9 @@ param_scheduler = [ ...@@ -20,3 +20,9 @@ param_scheduler = [
optim_wrapper = dict( optim_wrapper = dict(
type='OptimWrapper', type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)) optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001))
# Default setting for scaling LR automatically
# - `enable` means enable scaling LR automatically
# or not by default.
# - `base_batch_size` = (8 GPUs) x (2 samples per GPU).
auto_scale_lr = dict(enable=False, base_batch_size=16)
...@@ -28,3 +28,9 @@ param_scheduler = [ ...@@ -28,3 +28,9 @@ param_scheduler = [
milestones=[20, 23], milestones=[20, 23],
gamma=0.1) gamma=0.1)
] ]
# Default setting for scaling LR automatically
# - `enable` means enable scaling LR automatically
# or not by default.
# - `base_batch_size` = (8 GPUs) x (4 samples per GPU).
auto_scale_lr = dict(enable=False, base_batch_size=32)
...@@ -23,3 +23,9 @@ param_scheduler = [ ...@@ -23,3 +23,9 @@ param_scheduler = [
milestones=[24, 32], milestones=[24, 32],
gamma=0.1) gamma=0.1)
] ]
# Default setting for scaling LR automatically
# - `enable` means enable scaling LR automatically
# or not by default.
# - `base_batch_size` = (4 GPUs) x (8 samples per GPU).
auto_scale_lr = dict(enable=False, base_batch_size=32)
...@@ -19,3 +19,9 @@ param_scheduler = [ ...@@ -19,3 +19,9 @@ param_scheduler = [
train_cfg = dict(by_epoch=True, max_epochs=100) train_cfg = dict(by_epoch=True, max_epochs=100)
val_cfg = dict(interval=1) val_cfg = dict(interval=1)
test_cfg = dict() test_cfg = dict()
# Default setting for scaling LR automatically
# - `enable` means enable scaling LR automatically
# or not by default.
# - `base_batch_size` = (4 GPUs) x (32 samples per GPU).
auto_scale_lr = dict(enable=False, base_batch_size=128)
...@@ -19,3 +19,9 @@ param_scheduler = [ ...@@ -19,3 +19,9 @@ param_scheduler = [
train_cfg = dict(by_epoch=True, max_epochs=150) train_cfg = dict(by_epoch=True, max_epochs=150)
val_cfg = dict(interval=1) val_cfg = dict(interval=1)
test_cfg = dict() test_cfg = dict()
# Default setting for scaling LR automatically
# - `enable` means enable scaling LR automatically
# or not by default.
# - `base_batch_size` = (8 GPUs) x (8 samples per GPU).
auto_scale_lr = dict(enable=False, base_batch_size=64)
...@@ -19,3 +19,9 @@ param_scheduler = [ ...@@ -19,3 +19,9 @@ param_scheduler = [
train_cfg = dict(by_epoch=True, max_epochs=200) train_cfg = dict(by_epoch=True, max_epochs=200)
val_cfg = dict(interval=1) val_cfg = dict(interval=1)
test_cfg = dict() test_cfg = dict()
# Default setting for scaling LR automatically
# - `enable` means enable scaling LR automatically
# or not by default.
# - `base_batch_size` = (2 GPUs) x (16 samples per GPU).
auto_scale_lr = dict(enable=False, base_batch_size=32)
...@@ -19,3 +19,9 @@ param_scheduler = [ ...@@ -19,3 +19,9 @@ param_scheduler = [
train_cfg = dict(by_epoch=True, max_epochs=50) train_cfg = dict(by_epoch=True, max_epochs=50)
val_cfg = dict(interval=1) val_cfg = dict(interval=1)
test_cfg = dict() test_cfg = dict()
# Default setting for scaling LR automatically
# - `enable` means enable scaling LR automatically
# or not by default.
# - `base_batch_size` = (2 GPUs) x (16 samples per GPU).
auto_scale_lr = dict(enable=False, base_batch_size=32)
...@@ -48,14 +48,16 @@ model = dict( ...@@ -48,14 +48,16 @@ model = dict(
assign_per_class=True, assign_per_class=True,
bbox_coder=dict(type='DeltaXYZWLHRBBoxCoder'), bbox_coder=dict(type='DeltaXYZWLHRBBoxCoder'),
loss_cls=dict( loss_cls=dict(
type='FocalLoss', type='mmdet.FocalLoss',
use_sigmoid=True, use_sigmoid=True,
gamma=2.0, gamma=2.0,
alpha=0.25, alpha=0.25,
loss_weight=1.0), loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=2.0), loss_bbox=dict(
type='mmdet.SmoothL1Loss', beta=1.0 / 9.0, loss_weight=2.0),
loss_dir=dict( loss_dir=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.2)), type='mmdet.CrossEntropyLoss', use_sigmoid=False,
loss_weight=0.2)),
roi_head=dict( roi_head=dict(
type='PartAggregationROIHead', type='PartAggregationROIHead',
num_classes=3, num_classes=3,
...@@ -66,14 +68,16 @@ model = dict( ...@@ -66,14 +68,16 @@ model = dict(
seg_score_thr=0.3, seg_score_thr=0.3,
num_classes=3, num_classes=3,
loss_seg=dict( loss_seg=dict(
type='FocalLoss', type='mmdet.FocalLoss',
use_sigmoid=True, use_sigmoid=True,
reduction='sum', reduction='sum',
gamma=2.0, gamma=2.0,
alpha=0.25, alpha=0.25,
loss_weight=1.0), loss_weight=1.0),
loss_part=dict( loss_part=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)), type='mmdet.CrossEntropyLoss',
use_sigmoid=True,
loss_weight=1.0)),
seg_roi_extractor=dict( seg_roi_extractor=dict(
type='Single3DRoIAwareExtractor', type='Single3DRoIAwareExtractor',
roi_layer=dict( roi_layer=dict(
...@@ -81,7 +85,7 @@ model = dict( ...@@ -81,7 +85,7 @@ model = dict(
out_size=14, out_size=14,
max_pts_per_voxel=128, max_pts_per_voxel=128,
mode='max')), mode='max')),
part_roi_extractor=dict( bbox_roi_extractor=dict(
type='Single3DRoIAwareExtractor', type='Single3DRoIAwareExtractor',
roi_layer=dict( roi_layer=dict(
type='RoIAwarePool3d', type='RoIAwarePool3d',
...@@ -105,12 +109,12 @@ model = dict( ...@@ -105,12 +109,12 @@ model = dict(
roi_feat_size=14, roi_feat_size=14,
with_corner_loss=True, with_corner_loss=True,
loss_bbox=dict( loss_bbox=dict(
type='SmoothL1Loss', type='mmdet.SmoothL1Loss',
beta=1.0 / 9.0, beta=1.0 / 9.0,
reduction='sum', reduction='sum',
loss_weight=1.0), loss_weight=1.0),
loss_cls=dict( loss_cls=dict(
type='CrossEntropyLoss', type='mmdet.CrossEntropyLoss',
use_sigmoid=True, use_sigmoid=True,
reduction='sum', reduction='sum',
loss_weight=1.0))), loss_weight=1.0))),
...@@ -119,21 +123,21 @@ model = dict( ...@@ -119,21 +123,21 @@ model = dict(
rpn=dict( rpn=dict(
assigner=[ assigner=[
dict( # for Pedestrian dict( # for Pedestrian
type='MaxIoUAssigner', type='Max3DIoUAssigner',
iou_calculator=dict(type='BboxOverlapsNearest3D'), iou_calculator=dict(type='BboxOverlapsNearest3D'),
pos_iou_thr=0.5, pos_iou_thr=0.5,
neg_iou_thr=0.35, neg_iou_thr=0.35,
min_pos_iou=0.35, min_pos_iou=0.35,
ignore_iof_thr=-1), ignore_iof_thr=-1),
dict( # for Cyclist dict( # for Cyclist
type='MaxIoUAssigner', type='Max3DIoUAssigner',
iou_calculator=dict(type='BboxOverlapsNearest3D'), iou_calculator=dict(type='BboxOverlapsNearest3D'),
pos_iou_thr=0.5, pos_iou_thr=0.5,
neg_iou_thr=0.35, neg_iou_thr=0.35,
min_pos_iou=0.35, min_pos_iou=0.35,
ignore_iof_thr=-1), ignore_iof_thr=-1),
dict( # for Car dict( # for Car
type='MaxIoUAssigner', type='Max3DIoUAssigner',
iou_calculator=dict(type='BboxOverlapsNearest3D'), iou_calculator=dict(type='BboxOverlapsNearest3D'),
pos_iou_thr=0.6, pos_iou_thr=0.6,
neg_iou_thr=0.45, neg_iou_thr=0.45,
...@@ -153,7 +157,7 @@ model = dict( ...@@ -153,7 +157,7 @@ model = dict(
rcnn=dict( rcnn=dict(
assigner=[ assigner=[
dict( # for Pedestrian dict( # for Pedestrian
type='MaxIoUAssigner', type='Max3DIoUAssigner',
iou_calculator=dict( iou_calculator=dict(
type='BboxOverlaps3D', coordinate='lidar'), type='BboxOverlaps3D', coordinate='lidar'),
pos_iou_thr=0.55, pos_iou_thr=0.55,
...@@ -161,7 +165,7 @@ model = dict( ...@@ -161,7 +165,7 @@ model = dict(
min_pos_iou=0.55, min_pos_iou=0.55,
ignore_iof_thr=-1), ignore_iof_thr=-1),
dict( # for Cyclist dict( # for Cyclist
type='MaxIoUAssigner', type='Max3DIoUAssigner',
iou_calculator=dict( iou_calculator=dict(
type='BboxOverlaps3D', coordinate='lidar'), type='BboxOverlaps3D', coordinate='lidar'),
pos_iou_thr=0.55, pos_iou_thr=0.55,
...@@ -169,7 +173,7 @@ model = dict( ...@@ -169,7 +173,7 @@ model = dict(
min_pos_iou=0.55, min_pos_iou=0.55,
ignore_iof_thr=-1), ignore_iof_thr=-1),
dict( # for Car dict( # for Car
type='MaxIoUAssigner', type='Max3DIoUAssigner',
iou_calculator=dict( iou_calculator=dict(
type='BboxOverlaps3D', coordinate='lidar'), type='BboxOverlaps3D', coordinate='lidar'),
pos_iou_thr=0.55, pos_iou_thr=0.55,
...@@ -200,12 +204,13 @@ model = dict( ...@@ -200,12 +204,13 @@ model = dict(
use_rotate_nms=True, use_rotate_nms=True,
use_raw_score=True, use_raw_score=True,
nms_thr=0.01, nms_thr=0.01,
score_thr=0.3))) score_thr=0.1)))
# dataset settings # dataset settings
dataset_type = 'KittiDataset' dataset_type = 'KittiDataset'
data_root = 'data/kitti/' data_root = 'data/kitti/'
class_names = ['Pedestrian', 'Cyclist', 'Car'] class_names = ['Pedestrian', 'Cyclist', 'Car']
metainfo = dict(CLASSES=class_names)
input_modality = dict(use_lidar=True, use_camera=False) input_modality = dict(use_lidar=True, use_camera=False)
db_sampler = dict( db_sampler = dict(
data_root=data_root, data_root=data_root,
...@@ -215,9 +220,8 @@ db_sampler = dict( ...@@ -215,9 +220,8 @@ db_sampler = dict(
filter_by_difficulty=[-1], filter_by_difficulty=[-1],
filter_by_min_points=dict(Car=5, Pedestrian=5, Cyclist=5)), filter_by_min_points=dict(Car=5, Pedestrian=5, Cyclist=5)),
classes=class_names, classes=class_names,
sample_groups=dict(Car=20, Pedestrian=15, Cyclist=15), sample_groups=dict(Car=20, Pedestrian=15, Cyclist=15))
points_loader=dict(
type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4))
train_pipeline = [ train_pipeline = [
dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4), dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4),
dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True), dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True),
...@@ -231,8 +235,9 @@ train_pipeline = [ ...@@ -231,8 +235,9 @@ train_pipeline = [
dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range),
dict(type='ObjectNameFilter', classes=class_names), dict(type='ObjectNameFilter', classes=class_names),
dict(type='PointShuffle'), dict(type='PointShuffle'),
dict(type='DefaultFormatBundle3D', class_names=class_names), dict(
dict(type='Collect3D', keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) type='Pack3DDetInputs',
keys=['points', 'gt_labels_3d', 'gt_bboxes_3d'])
] ]
test_pipeline = [ test_pipeline = [
dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4), dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4),
...@@ -249,88 +254,133 @@ test_pipeline = [ ...@@ -249,88 +254,133 @@ test_pipeline = [
translation_std=[0, 0, 0]), translation_std=[0, 0, 0]),
dict(type='RandomFlip3D'), dict(type='RandomFlip3D'),
dict( dict(
type='PointsRangeFilter', point_cloud_range=point_cloud_range), type='PointsRangeFilter', point_cloud_range=point_cloud_range)
dict( ]),
type='DefaultFormatBundle3D', dict(type='Pack3DDetInputs', keys=['points'])
class_names=class_names,
with_label=False),
dict(type='Collect3D', keys=['points'])
])
] ]
# construct a pipeline for data and gt loading in show function # construct a pipeline for data and gt loading in show function
# please keep its loading function consistent with test_pipeline (e.g. client) # please keep its loading function consistent with test_pipeline (e.g. client)
eval_pipeline = [ eval_pipeline = [
dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4), dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4),
dict( dict(type='Pack3DDetInputs', keys=['points'])
type='DefaultFormatBundle3D',
class_names=class_names,
with_label=False),
dict(type='Collect3D', keys=['points'])
] ]
data = dict( train_dataloader = dict(
samples_per_gpu=4, batch_size=4,
workers_per_gpu=4, num_workers=4,
train=dict( persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
dataset=dict(
type=dataset_type, type=dataset_type,
data_root=data_root, data_root=data_root,
ann_file=data_root + 'kitti_infos_train.pkl', ann_file='kitti_infos_train.pkl',
split='training', data_prefix=dict(pts='training/velodyne_reduced'),
pts_prefix='velodyne_reduced',
pipeline=train_pipeline, pipeline=train_pipeline,
modality=input_modality, modality=input_modality,
classes=class_names, test_mode=False,
test_mode=False), metainfo=metainfo,
val=dict( # we use box_type_3d='LiDAR' in kitti and nuscenes dataset
# and box_type_3d='Depth' in sunrgbd and scannet dataset.
box_type_3d='LiDAR'))
val_dataloader = dict(
batch_size=1,
num_workers=1,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type, type=dataset_type,
data_root=data_root, data_root=data_root,
ann_file=data_root + 'kitti_infos_val.pkl', data_prefix=dict(pts='training/velodyne_reduced'),
split='training', ann_file='kitti_infos_val.pkl',
pts_prefix='velodyne_reduced',
pipeline=test_pipeline, pipeline=test_pipeline,
modality=input_modality, modality=input_modality,
classes=class_names, test_mode=True,
test_mode=True), metainfo=metainfo,
test=dict( box_type_3d='LiDAR'))
type=dataset_type, test_dataloader = val_dataloader
data_root=data_root,
val_evaluator = dict(
type='KittiMetric',
ann_file=data_root + 'kitti_infos_val.pkl', ann_file=data_root + 'kitti_infos_val.pkl',
split='training', metric='bbox')
pts_prefix='velodyne_reduced', test_evaluator = val_evaluator
pipeline=test_pipeline,
modality=input_modality,
classes=class_names,
test_mode=True))
# optimizer # optimizer
lr = 0.001 # max learning rate lr = 0.001 # max learning rate
optimizer = dict(type='AdamW', lr=lr, betas=(0.95, 0.99), weight_decay=0.01) epoch_num = 80
optimizer_config = dict(grad_clip=dict(max_norm=10, norm_type=2)) optim_wrapper = dict(
lr_config = dict( type='OptimWrapper',
policy='cyclic', optimizer=dict(type='AdamW', lr=lr, betas=(0.95, 0.99), weight_decay=0.01),
target_ratio=(10, 1e-4), clip_grad=dict(max_norm=10, norm_type=2))
cyclic_times=1,
step_ratio_up=0.4) # learning policy
momentum_config = dict( param_scheduler = [
policy='cyclic', dict(
target_ratio=(0.85 / 0.95, 1), type='CosineAnnealingLR',
cyclic_times=1, T_max=epoch_num * 0.4,
step_ratio_up=0.4) eta_min=lr * 10,
checkpoint_config = dict(interval=1) begin=0,
evaluation = dict(interval=1, pipeline=eval_pipeline) end=epoch_num * 0.4,
# yapf:disable by_epoch=True,
log_config = dict( convert_to_iter_based=True),
interval=50, dict(
hooks=[ type='CosineAnnealingLR',
dict(type='TextLoggerHook'), T_max=epoch_num * 0.6,
dict(type='TensorboardLoggerHook') eta_min=lr * 1e-4,
]) begin=epoch_num * 0.4,
# yapf:enable end=epoch_num * 1,
# runtime settings by_epoch=True,
runner = dict(type='EpochBasedRunner', max_epochs=80) convert_to_iter_based=True),
dist_params = dict(backend='nccl', port=29506) dict(
type='CosineAnnealingMomentum',
T_max=epoch_num * 0.4,
eta_min=0.85 / 0.95,
begin=0,
end=epoch_num * 0.4,
by_epoch=True,
convert_to_iter_based=True),
dict(
type='CosineAnnealingMomentum',
T_max=epoch_num * 0.6,
eta_min=1,
begin=epoch_num * 0.4,
end=epoch_num * 1,
convert_to_iter_based=True)
]
train_cfg = dict(by_epoch=True, max_epochs=epoch_num, val_interval=50)
val_cfg = dict()
test_cfg = dict()
auto_scale_lr = dict(enable=False, base_batch_size=32)
default_scope = 'mmdet3d'
default_hooks = dict(
timer=dict(type='IterTimerHook'),
logger=dict(type='LoggerHook', interval=50),
param_scheduler=dict(type='ParamSchedulerHook'),
checkpoint=dict(type='CheckpointHook', interval=1),
sampler_seed=dict(type='DistSamplerSeedHook'),
visualization=dict(type='Det3DVisualizationHook'))
custom_hooks = [
dict(type='BenchmarkHook'),
]
env_cfg = dict(
cudnn_benchmark=False,
mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
dist_cfg=dict(backend='nccl'),
)
vis_backends = [dict(type='LocalVisBackend')]
visualizer = dict(
type='Det3DLocalVisualizer', vis_backends=vis_backends, name='visualizer')
log_processor = dict(type='LogProcessor', window_size=50, by_epoch=True)
log_level = 'INFO' log_level = 'INFO'
load_from = None
resume = False
find_unused_parameters = True find_unused_parameters = True
work_dir = './work_dirs/parta2_secfpn_80e' work_dir = './work_dirs/parta2_secfpn_80e'
load_from = None
resume_from = None
workflow = [('train', 1)]
...@@ -46,18 +46,20 @@ model = dict( ...@@ -46,18 +46,20 @@ model = dict(
diff_rad_by_sin=True, diff_rad_by_sin=True,
bbox_coder=dict(type='DeltaXYZWLHRBBoxCoder'), bbox_coder=dict(type='DeltaXYZWLHRBBoxCoder'),
loss_cls=dict( loss_cls=dict(
type='FocalLoss', type='mmdet.FocalLoss',
use_sigmoid=True, use_sigmoid=True,
gamma=2.0, gamma=2.0,
alpha=0.25, alpha=0.25,
loss_weight=1.0), loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=2.0), loss_bbox=dict(
type='mmdet.SmoothL1Loss', beta=1.0 / 9.0, loss_weight=2.0),
loss_dir=dict( loss_dir=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.2)), type='mmdet.CrossEntropyLoss', use_sigmoid=False,
loss_weight=0.2)),
# model training and testing settings # model training and testing settings
train_cfg=dict( train_cfg=dict(
assigner=dict( assigner=dict(
type='MaxIoUAssigner', type='Max3DIoUAssigner',
iou_calculator=dict(type='BboxOverlapsNearest3D'), iou_calculator=dict(type='BboxOverlapsNearest3D'),
pos_iou_thr=0.6, pos_iou_thr=0.6,
neg_iou_thr=0.45, neg_iou_thr=0.45,
...@@ -79,6 +81,7 @@ model = dict( ...@@ -79,6 +81,7 @@ model = dict(
dataset_type = 'KittiDataset' dataset_type = 'KittiDataset'
data_root = 'data/kitti/' data_root = 'data/kitti/'
class_names = ['Car'] class_names = ['Car']
metainfo = dict(CLASSES=class_names)
input_modality = dict(use_lidar=True, use_camera=False) input_modality = dict(use_lidar=True, use_camera=False)
db_sampler = dict( db_sampler = dict(
data_root=data_root, data_root=data_root,
...@@ -86,9 +89,7 @@ db_sampler = dict( ...@@ -86,9 +89,7 @@ db_sampler = dict(
rate=1.0, rate=1.0,
prepare=dict(filter_by_difficulty=[-1], filter_by_min_points=dict(Car=5)), prepare=dict(filter_by_difficulty=[-1], filter_by_min_points=dict(Car=5)),
sample_groups=dict(Car=15), sample_groups=dict(Car=15),
classes=class_names, classes=class_names)
points_loader=dict(
type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4))
train_pipeline = [ train_pipeline = [
dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4), dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4),
...@@ -108,99 +109,140 @@ train_pipeline = [ ...@@ -108,99 +109,140 @@ train_pipeline = [
dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range),
dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range),
dict(type='PointShuffle'), dict(type='PointShuffle'),
dict(type='DefaultFormatBundle3D', class_names=class_names), dict(
dict(type='Collect3D', keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) type='Pack3DDetInputs',
keys=['points', 'gt_labels_3d', 'gt_bboxes_3d'])
] ]
test_pipeline = [ test_pipeline = [
dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4), dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4),
dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range),
dict( dict(type='Pack3DDetInputs', keys=['points'])
type='DefaultFormatBundle3D',
class_names=class_names,
with_label=False),
dict(type='Collect3D', keys=['points'])
] ]
# construct a pipeline for data and gt loading in show function # construct a pipeline for data and gt loading in show function
# please keep its loading function consistent with test_pipeline (e.g. client) # please keep its loading function consistent with test_pipeline (e.g. client)
eval_pipeline = [ eval_pipeline = [
dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4), dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4),
dict( dict(type='Pack3DDetInputs', keys=['points'])
type='DefaultFormatBundle3D',
class_names=class_names,
with_label=False),
dict(type='Collect3D', keys=['points'])
] ]
data = dict( train_dataloader = dict(
samples_per_gpu=3, batch_size=3,
workers_per_gpu=3, num_workers=3,
train=dict( persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
dataset=dict(
type='RepeatDataset', type='RepeatDataset',
times=2, times=2,
dataset=dict( dataset=dict(
type=dataset_type, type=dataset_type,
data_root=data_root, data_root=data_root,
ann_file=data_root + 'kitti_infos_train.pkl', ann_file='kitti_infos_train.pkl',
split='training', data_prefix=dict(pts='training/velodyne_reduced'),
pts_prefix='velodyne_reduced',
pipeline=train_pipeline, pipeline=train_pipeline,
modality=input_modality, modality=input_modality,
classes=class_names, test_mode=False,
test_mode=False)), metainfo=metainfo,
val=dict( # we use box_type_3d='LiDAR' in kitti and nuscenes dataset
# and box_type_3d='Depth' in sunrgbd and scannet dataset.
box_type_3d='LiDAR')))
val_dataloader = dict(
batch_size=1,
num_workers=1,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type, type=dataset_type,
data_root=data_root, data_root=data_root,
ann_file=data_root + 'kitti_infos_val.pkl', data_prefix=dict(pts='training/velodyne_reduced'),
split='training', ann_file='kitti_infos_val.pkl',
pts_prefix='velodyne_reduced',
pipeline=test_pipeline, pipeline=test_pipeline,
modality=input_modality, modality=input_modality,
classes=class_names, test_mode=True,
test_mode=True), metainfo=metainfo,
test=dict( box_type_3d='LiDAR'))
type=dataset_type, test_dataloader = val_dataloader
data_root=data_root,
val_evaluator = dict(
type='KittiMetric',
ann_file=data_root + 'kitti_infos_val.pkl', ann_file=data_root + 'kitti_infos_val.pkl',
split='training', metric='bbox')
pts_prefix='velodyne_reduced', test_evaluator = val_evaluator
pipeline=test_pipeline,
modality=input_modality,
classes=class_names,
test_mode=True))
# optimizer # optimizer
lr = 0.001 # max learning rate lr = 0.001 # max learning rate
optimizer = dict( epoch_num = 50
type='AdamW', optim_wrapper = dict(
lr=lr, type='OptimWrapper',
betas=(0.95, 0.99), # the momentum is change during training optimizer=dict(type='AdamW', lr=lr, betas=(0.95, 0.99), weight_decay=0.01),
weight_decay=0.01) clip_grad=dict(max_norm=10, norm_type=2))
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy # learning policy
lr_config = dict( param_scheduler = [
policy='cyclic', dict(
target_ratio=(10, 1e-4), type='CosineAnnealingLR',
cyclic_times=1, T_max=epoch_num * 0.4,
step_ratio_up=0.4) eta_min=lr * 10,
momentum_config = dict( begin=0,
policy='cyclic', end=epoch_num * 0.4,
target_ratio=(0.85 / 0.95, 1), by_epoch=True,
cyclic_times=1, convert_to_iter_based=True),
step_ratio_up=0.4) dict(
checkpoint_config = dict(interval=1) type='CosineAnnealingLR',
evaluation = dict(interval=1, pipeline=eval_pipeline) T_max=epoch_num * 0.6,
# yapf:disable eta_min=lr * 1e-4,
log_config = dict( begin=epoch_num * 0.4,
interval=50, end=epoch_num * 1,
hooks=[ by_epoch=True,
dict(type='TextLoggerHook'), convert_to_iter_based=True),
dict(type='TensorboardLoggerHook') dict(
]) type='CosineAnnealingMomentum',
# yapf:enable T_max=epoch_num * 0.4,
# runtime settings eta_min=0.85 / 0.95,
runner = dict(type='EpochBasedRunner', max_epochs=50) begin=0,
dist_params = dict(backend='nccl') end=epoch_num * 0.4,
by_epoch=True,
convert_to_iter_based=True),
dict(
type='CosineAnnealingMomentum',
T_max=epoch_num * 0.6,
eta_min=1,
begin=epoch_num * 0.4,
end=epoch_num * 1,
convert_to_iter_based=True)
]
train_cfg = dict(by_epoch=True, max_epochs=epoch_num, val_interval=50)
val_cfg = dict()
test_cfg = dict()
auto_scale_lr = dict(enable=False, base_batch_size=24)
default_scope = 'mmdet3d'
default_hooks = dict(
timer=dict(type='IterTimerHook'),
logger=dict(type='LoggerHook', interval=50),
param_scheduler=dict(type='ParamSchedulerHook'),
checkpoint=dict(type='CheckpointHook', interval=1),
sampler_seed=dict(type='DistSamplerSeedHook'),
visualization=dict(type='Det3DVisualizationHook'))
custom_hooks = [
dict(type='BenchmarkHook'),
]
env_cfg = dict(
cudnn_benchmark=False,
mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
dist_cfg=dict(backend='nccl'),
)
vis_backends = [dict(type='LocalVisBackend')]
visualizer = dict(
type='Det3DLocalVisualizer', vis_backends=vis_backends, name='visualizer')
log_processor = dict(type='LogProcessor', window_size=50, by_epoch=True)
log_level = 'INFO' log_level = 'INFO'
work_dir = './work_dirs/pp_secfpn_100e'
load_from = None load_from = None
resume_from = None resume = False
workflow = [('train', 50)] work_dir = './work_dirs/pp_secfpn_100e'
...@@ -56,34 +56,35 @@ model = dict( ...@@ -56,34 +56,35 @@ model = dict(
diff_rad_by_sin=True, diff_rad_by_sin=True,
bbox_coder=dict(type='DeltaXYZWLHRBBoxCoder'), bbox_coder=dict(type='DeltaXYZWLHRBBoxCoder'),
loss_cls=dict( loss_cls=dict(
type='FocalLoss', type='mmdet.FocalLoss',
use_sigmoid=True, use_sigmoid=True,
gamma=2.0, gamma=2.0,
alpha=0.25, alpha=0.25,
loss_weight=1.0), loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=2.0), loss_bbox=dict(
type='mmdet.SmoothL1Loss', beta=1.0 / 9.0, loss_weight=2.0),
loss_dir=dict( loss_dir=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.2), type='mmdet.CrossEntropyLoss', use_sigmoid=False, loss_weight=0.2),
), ),
# model training and testing settings # model training and testing settings
train_cfg=dict( train_cfg=dict(
assigner=[ assigner=[
dict( # for Pedestrian dict( # for Pedestrian
type='MaxIoUAssigner', type='Max3DIoUAssigner',
iou_calculator=dict(type='BboxOverlapsNearest3D'), iou_calculator=dict(type='BboxOverlapsNearest3D'),
pos_iou_thr=0.5, pos_iou_thr=0.5,
neg_iou_thr=0.35, neg_iou_thr=0.35,
min_pos_iou=0.35, min_pos_iou=0.35,
ignore_iof_thr=-1), ignore_iof_thr=-1),
dict( # for Cyclist dict( # for Cyclist
type='MaxIoUAssigner', type='Max3DIoUAssigner',
iou_calculator=dict(type='BboxOverlapsNearest3D'), iou_calculator=dict(type='BboxOverlapsNearest3D'),
pos_iou_thr=0.5, pos_iou_thr=0.5,
neg_iou_thr=0.35, neg_iou_thr=0.35,
min_pos_iou=0.35, min_pos_iou=0.35,
ignore_iof_thr=-1), ignore_iof_thr=-1),
dict( # for Car dict( # for Car
type='MaxIoUAssigner', type='Max3DIoUAssigner',
iou_calculator=dict(type='BboxOverlapsNearest3D'), iou_calculator=dict(type='BboxOverlapsNearest3D'),
pos_iou_thr=0.6, pos_iou_thr=0.6,
neg_iou_thr=0.45, neg_iou_thr=0.45,
...@@ -106,6 +107,8 @@ model = dict( ...@@ -106,6 +107,8 @@ model = dict(
dataset_type = 'KittiDataset' dataset_type = 'KittiDataset'
data_root = 'data/kitti/' data_root = 'data/kitti/'
class_names = ['Pedestrian', 'Cyclist', 'Car'] class_names = ['Pedestrian', 'Cyclist', 'Car']
metainfo = dict(CLASSES=class_names)
input_modality = dict(use_lidar=True, use_camera=False) input_modality = dict(use_lidar=True, use_camera=False)
db_sampler = dict( db_sampler = dict(
data_root=data_root, data_root=data_root,
...@@ -123,9 +126,7 @@ db_sampler = dict( ...@@ -123,9 +126,7 @@ db_sampler = dict(
Car=15, Car=15,
Pedestrian=15, Pedestrian=15,
Cyclist=15, Cyclist=15,
), ))
points_loader=dict(
type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4))
train_pipeline = [ train_pipeline = [
dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4), dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4),
...@@ -139,8 +140,9 @@ train_pipeline = [ ...@@ -139,8 +140,9 @@ train_pipeline = [
dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range),
dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range),
dict(type='PointShuffle'), dict(type='PointShuffle'),
dict(type='DefaultFormatBundle3D', class_names=class_names), dict(
dict(type='Collect3D', keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']), type='Pack3DDetInputs',
keys=['points', 'gt_labels_3d', 'gt_bboxes_3d'])
] ]
test_pipeline = [ test_pipeline = [
dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4), dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4),
...@@ -158,91 +160,132 @@ test_pipeline = [ ...@@ -158,91 +160,132 @@ test_pipeline = [
dict(type='RandomFlip3D'), dict(type='RandomFlip3D'),
dict( dict(
type='PointsRangeFilter', point_cloud_range=point_cloud_range), type='PointsRangeFilter', point_cloud_range=point_cloud_range),
dict( ]),
type='DefaultFormatBundle3D', dict(type='Pack3DDetInputs', keys=['points'])
class_names=class_names,
with_label=False),
dict(type='Collect3D', keys=['points'])
])
] ]
# construct a pipeline for data and gt loading in show function # construct a pipeline for data and gt loading in show function
# please keep its loading function consistent with test_pipeline (e.g. client) # please keep its loading function consistent with test_pipeline (e.g. client)
eval_pipeline = [ eval_pipeline = [
dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4), dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4),
dict( dict(type='Pack3DDetInputs', keys=['points'])
type='DefaultFormatBundle3D',
class_names=class_names,
with_label=False),
dict(type='Collect3D', keys=['points'])
] ]
data = dict( train_dataloader = dict(
samples_per_gpu=4, batch_size=4,
workers_per_gpu=4, num_workers=4,
train=dict( persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
dataset=dict(
type=dataset_type, type=dataset_type,
data_root=data_root, data_root=data_root,
ann_file=data_root + 'kitti_infos_train.pkl', ann_file='kitti_infos_train.pkl',
split='training', data_prefix=dict(pts='training/velodyne_reduced'),
pts_prefix='velodyne_reduced',
pipeline=train_pipeline, pipeline=train_pipeline,
modality=input_modality, modality=input_modality,
classes=class_names, test_mode=False,
test_mode=False), metainfo=metainfo,
val=dict( # we use box_type_3d='LiDAR' in kitti and nuscenes dataset
# and box_type_3d='Depth' in sunrgbd and scannet dataset.
box_type_3d='LiDAR'))
val_dataloader = dict(
batch_size=1,
num_workers=1,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type, type=dataset_type,
data_root=data_root, data_root=data_root,
ann_file=data_root + 'kitti_infos_val.pkl', data_prefix=dict(pts='training/velodyne_reduced'),
split='training', ann_file='kitti_infos_val.pkl',
pts_prefix='velodyne_reduced',
pipeline=test_pipeline, pipeline=test_pipeline,
modality=input_modality, modality=input_modality,
classes=class_names, test_mode=True,
test_mode=True), metainfo=metainfo,
test=dict( box_type_3d='LiDAR'))
type=dataset_type, test_dataloader = val_dataloader
data_root=data_root,
val_evaluator = dict(
type='KittiMetric',
ann_file=data_root + 'kitti_infos_val.pkl', ann_file=data_root + 'kitti_infos_val.pkl',
split='training', metric='bbox')
pts_prefix='velodyne_reduced', test_evaluator = val_evaluator
pipeline=test_pipeline,
modality=input_modality,
classes=class_names,
test_mode=True))
# optimizer # optimizer
lr = 0.0003 # max learning rate lr = 0.0003 # max learning rate
optimizer = dict( epoch_num = 80
type='AdamW', optim_wrapper = dict(
lr=lr, type='OptimWrapper',
betas=(0.95, 0.99), # the momentum is change during training optimizer=dict(type='AdamW', lr=lr, betas=(0.95, 0.99), weight_decay=0.01),
weight_decay=0.01) clip_grad=dict(max_norm=10, norm_type=2))
optimizer_config = dict(grad_clip=dict(max_norm=10, norm_type=2))
# learning policy # learning policy
lr_config = dict( param_scheduler = [
policy='cyclic', dict(
target_ratio=(10, 1e-4), type='CosineAnnealingLR',
cyclic_times=1, T_max=epoch_num * 0.4,
step_ratio_up=0.4) eta_min=lr * 10,
momentum_config = dict( begin=0,
policy='cyclic', end=epoch_num * 0.4,
target_ratio=(0.85 / 0.95, 1), by_epoch=True,
cyclic_times=1, convert_to_iter_based=True),
step_ratio_up=0.4) dict(
checkpoint_config = dict(interval=1) type='CosineAnnealingLR',
evaluation = dict(interval=2, pipeline=eval_pipeline) T_max=epoch_num * 0.6,
# yapf:disable eta_min=lr * 1e-4,
log_config = dict( begin=epoch_num * 0.4,
interval=50, end=epoch_num * 1,
hooks=[ by_epoch=True,
dict(type='TextLoggerHook'), convert_to_iter_based=True),
dict(type='TensorboardLoggerHook') dict(
]) type='CosineAnnealingMomentum',
# yapf:enable T_max=epoch_num * 0.4,
# runtime settings eta_min=0.85 / 0.95,
runner = dict(type='EpochBasedRunner', max_epochs=80) begin=0,
dist_params = dict(backend='nccl') end=epoch_num * 0.4,
by_epoch=True,
convert_to_iter_based=True),
dict(
type='CosineAnnealingMomentum',
T_max=epoch_num * 0.6,
eta_min=1,
begin=epoch_num * 0.4,
end=epoch_num * 1,
convert_to_iter_based=True)
]
train_cfg = dict(by_epoch=True, max_epochs=epoch_num, val_interval=50)
val_cfg = dict()
test_cfg = dict()
auto_scale_lr = dict(enable=False, base_batch_size=32)
default_scope = 'mmdet3d'
default_hooks = dict(
timer=dict(type='IterTimerHook'),
logger=dict(type='LoggerHook', interval=50),
param_scheduler=dict(type='ParamSchedulerHook'),
checkpoint=dict(type='CheckpointHook', interval=1),
sampler_seed=dict(type='DistSamplerSeedHook'),
visualization=dict(type='Det3DVisualizationHook'))
custom_hooks = [
dict(type='BenchmarkHook'),
]
env_cfg = dict(
cudnn_benchmark=False,
mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
dist_cfg=dict(backend='nccl'),
)
vis_backends = [dict(type='LocalVisBackend')]
visualizer = dict(
type='Det3DLocalVisualizer', vis_backends=vis_backends, name='visualizer')
log_processor = dict(type='LogProcessor', window_size=50, by_epoch=True)
log_level = 'INFO' log_level = 'INFO'
work_dir = './work_dirs/pp_secfpn_80e'
load_from = None load_from = None
resume_from = None resume = False
workflow = [('train', 1)] work_dir = './work_dirs/pp_secfpn_80e'
...@@ -48,33 +48,35 @@ model = dict( ...@@ -48,33 +48,35 @@ model = dict(
diff_rad_by_sin=True, diff_rad_by_sin=True,
bbox_coder=dict(type='DeltaXYZWLHRBBoxCoder'), bbox_coder=dict(type='DeltaXYZWLHRBBoxCoder'),
loss_cls=dict( loss_cls=dict(
type='FocalLoss', type='mmdet.FocalLoss',
use_sigmoid=True, use_sigmoid=True,
gamma=2.0, gamma=2.0,
alpha=0.25, alpha=0.25,
loss_weight=1.0), loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=2.0), loss_bbox=dict(
type='mmdet.SmoothL1Loss', beta=1.0 / 9.0, loss_weight=2.0),
loss_dir=dict( loss_dir=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.2)), type='mmdet.CrossEntropyLoss', use_sigmoid=False,
loss_weight=0.2)),
# model training and testing settings # model training and testing settings
train_cfg=dict( train_cfg=dict(
assigner=[ assigner=[
dict( # for Pedestrian dict( # for Pedestrian
type='MaxIoUAssigner', type='Max3DIoUAssigner',
iou_calculator=dict(type='BboxOverlapsNearest3D'), iou_calculator=dict(type='BboxOverlapsNearest3D'),
pos_iou_thr=0.5, pos_iou_thr=0.5,
neg_iou_thr=0.35, neg_iou_thr=0.35,
min_pos_iou=0.35, min_pos_iou=0.35,
ignore_iof_thr=-1), ignore_iof_thr=-1),
dict( # for Cyclist dict( # for Cyclist
type='MaxIoUAssigner', type='Max3DIoUAssigner',
iou_calculator=dict(type='BboxOverlapsNearest3D'), iou_calculator=dict(type='BboxOverlapsNearest3D'),
pos_iou_thr=0.5, pos_iou_thr=0.5,
neg_iou_thr=0.35, neg_iou_thr=0.35,
min_pos_iou=0.35, min_pos_iou=0.35,
ignore_iof_thr=-1), ignore_iof_thr=-1),
dict( # for Car dict( # for Car
type='MaxIoUAssigner', type='Max3DIoUAssigner',
iou_calculator=dict(type='BboxOverlapsNearest3D'), iou_calculator=dict(type='BboxOverlapsNearest3D'),
pos_iou_thr=0.6, pos_iou_thr=0.6,
neg_iou_thr=0.45, neg_iou_thr=0.45,
...@@ -97,7 +99,8 @@ model = dict( ...@@ -97,7 +99,8 @@ model = dict(
dataset_type = 'KittiDataset' dataset_type = 'KittiDataset'
data_root = 'data/kitti/' data_root = 'data/kitti/'
class_names = ['Pedestrian', 'Cyclist', 'Car'] class_names = ['Pedestrian', 'Cyclist', 'Car']
input_modality = dict(use_lidar=False, use_camera=False) metainfo = dict(CLASSES=class_names)
input_modality = dict(use_lidar=True, use_camera=False)
db_sampler = dict( db_sampler = dict(
data_root=data_root, data_root=data_root,
info_path=data_root + 'kitti_dbinfos_train.pkl', info_path=data_root + 'kitti_dbinfos_train.pkl',
...@@ -114,12 +117,7 @@ db_sampler = dict( ...@@ -114,12 +117,7 @@ db_sampler = dict(
Car=20, Car=20,
Pedestrian=15, Pedestrian=15,
Cyclist=15, Cyclist=15,
), ))
points_loader=dict(
type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4))
file_client_args = dict(backend='disk')
# file_client_args = dict(
# backend='petrel', path_mapping=dict(data='s3://kitti_data/'))
train_pipeline = [ train_pipeline = [
dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4), dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4),
...@@ -133,8 +131,9 @@ train_pipeline = [ ...@@ -133,8 +131,9 @@ train_pipeline = [
dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range),
dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range),
dict(type='PointShuffle'), dict(type='PointShuffle'),
dict(type='DefaultFormatBundle3D', class_names=class_names), dict(
dict(type='Collect3D', keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) type='Pack3DDetInputs',
keys=['points', 'gt_bboxes_3d', 'gt_labels_3d'])
] ]
test_pipeline = [ test_pipeline = [
dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4), dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4),
...@@ -151,87 +150,132 @@ test_pipeline = [ ...@@ -151,87 +150,132 @@ test_pipeline = [
translation_std=[0, 0, 0]), translation_std=[0, 0, 0]),
dict(type='RandomFlip3D'), dict(type='RandomFlip3D'),
dict( dict(
type='PointsRangeFilter', point_cloud_range=point_cloud_range), type='PointsRangeFilter', point_cloud_range=point_cloud_range)
dict( ]),
type='DefaultFormatBundle3D', dict(type='Pack3DDetInputs', keys=['points'])
class_names=class_names,
with_label=False),
dict(type='Collect3D', keys=['points'])
])
] ]
# construct a pipeline for data and gt loading in show function # construct a pipeline for data and gt loading in show function
# please keep its loading function consistent with test_pipeline (e.g. client) # please keep its loading function consistent with test_pipeline (e.g. client)
eval_pipeline = [ eval_pipeline = [
dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4), dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4),
dict( dict(type='Pack3DDetInputs', keys=['points'])
type='DefaultFormatBundle3D',
class_names=class_names,
with_label=False),
dict(type='Collect3D', keys=['points'])
] ]
data = dict( train_dataloader = dict(
samples_per_gpu=4, batch_size=4,
workers_per_gpu=4, num_workers=4,
train=dict( persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
dataset=dict(
type=dataset_type, type=dataset_type,
data_root=data_root, data_root=data_root,
ann_file=data_root + 'kitti_infos_train.pkl', ann_file='kitti_infos_train.pkl',
split='training', data_prefix=dict(pts='training/velodyne_reduced'),
pts_prefix='velodyne_reduced',
pipeline=train_pipeline, pipeline=train_pipeline,
modality=input_modality, modality=input_modality,
classes=class_names, test_mode=False,
test_mode=False), metainfo=metainfo,
val=dict( # we use box_type_3d='LiDAR' in kitti and nuscenes dataset
# and box_type_3d='Depth' in sunrgbd and scannet dataset.
box_type_3d='LiDAR'))
val_dataloader = dict(
batch_size=1,
num_workers=1,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type, type=dataset_type,
data_root=data_root, data_root=data_root,
ann_file=data_root + 'kitti_infos_val.pkl', data_prefix=dict(pts='training/velodyne_reduced'),
split='training', ann_file='kitti_infos_val.pkl',
pts_prefix='velodyne_reduced',
pipeline=test_pipeline, pipeline=test_pipeline,
modality=input_modality, modality=input_modality,
classes=class_names, test_mode=True,
test_mode=True), metainfo=metainfo,
test=dict( box_type_3d='LiDAR'))
type=dataset_type, test_dataloader = val_dataloader
data_root=data_root,
val_evaluator = dict(
type='KittiMetric',
ann_file=data_root + 'kitti_infos_val.pkl', ann_file=data_root + 'kitti_infos_val.pkl',
split='training', metric='bbox')
pts_prefix='velodyne_reduced', test_evaluator = val_evaluator
pipeline=test_pipeline,
modality=input_modality,
classes=class_names,
test_mode=True))
# optimizer # optimizer
lr = 0.0003 # max learning rate lr = 0.0003 # max learning rate
optimizer = dict(type='AdamW', lr=lr, betas=(0.95, 0.99), weight_decay=0.01) epoch_num = 80
optimizer_config = dict(grad_clip=dict(max_norm=10, norm_type=2)) optim_wrapper = dict(
lr_config = dict( type='OptimWrapper',
policy='cyclic', optimizer=dict(type='AdamW', lr=lr, betas=(0.95, 0.99), weight_decay=0.01),
target_ratio=(10, 1e-4), clip_grad=dict(max_norm=10, norm_type=2))
cyclic_times=1,
step_ratio_up=0.4) # learning policy
momentum_config = dict( param_scheduler = [
policy='cyclic', dict(
target_ratio=(0.85 / 0.95, 1), type='CosineAnnealingLR',
cyclic_times=1, T_max=epoch_num * 0.4,
step_ratio_up=0.4) eta_min=lr * 10,
checkpoint_config = dict(interval=1) begin=0,
evaluation = dict(interval=2, pipeline=eval_pipeline) end=epoch_num * 0.4,
# yapf:disable by_epoch=True,
log_config = dict( convert_to_iter_based=True),
interval=50, dict(
hooks=[ type='CosineAnnealingLR',
dict(type='TextLoggerHook'), T_max=epoch_num * 0.6,
dict(type='TensorboardLoggerHook') eta_min=lr * 1e-4,
]) begin=epoch_num * 0.4,
# yapf:enable end=epoch_num * 1,
# runtime settings by_epoch=True,
runner = dict(type='EpochBasedRunner', max_epochs=80) convert_to_iter_based=True),
dist_params = dict(backend='nccl') dict(
type='CosineAnnealingMomentum',
T_max=epoch_num * 0.4,
eta_min=0.85 / 0.95,
begin=0,
end=epoch_num * 0.4,
by_epoch=True,
convert_to_iter_based=True),
dict(
type='CosineAnnealingMomentum',
T_max=epoch_num * 0.6,
eta_min=1,
begin=epoch_num * 0.4,
end=epoch_num * 1,
convert_to_iter_based=True)
]
train_cfg = dict(by_epoch=True, max_epochs=epoch_num, val_interval=50)
val_cfg = dict()
test_cfg = dict()
auto_scale_lr = dict(enable=False, base_batch_size=32)
default_scope = 'mmdet3d'
default_hooks = dict(
timer=dict(type='IterTimerHook'),
logger=dict(type='LoggerHook', interval=50),
param_scheduler=dict(type='ParamSchedulerHook'),
checkpoint=dict(type='CheckpointHook', interval=1),
sampler_seed=dict(type='DistSamplerSeedHook'),
visualization=dict(type='Det3DVisualizationHook'))
custom_hooks = [
dict(type='BenchmarkHook'),
]
env_cfg = dict(
cudnn_benchmark=False,
mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
dist_cfg=dict(backend='nccl'),
)
vis_backends = [dict(type='LocalVisBackend')]
visualizer = dict(
type='Det3DLocalVisualizer', vis_backends=vis_backends, name='visualizer')
log_processor = dict(type='LogProcessor', window_size=50, by_epoch=True)
log_level = 'INFO' log_level = 'INFO'
work_dir = './work_dirs/sec_secfpn_80e'
load_from = None load_from = None
resume_from = None resume = False
workflow = [('train', 1)] work_dir = './work_dirs/pp_secfpn_100e'
_base_ = [
'../_base_/datasets/waymoD5-mv3d-3class.py',
'../_base_/models/multiview_dfm.py'
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='AdamW', lr=0.0005, weight_decay=0.0001),
paramwise_cfg=dict(
custom_keys={'backbone': dict(lr_mult=0.1, decay_mult=1.0)}),
clip_grad=dict(max_norm=35., norm_type=2))
param_scheduler = [
dict(
type='MultiStepLR',
begin=0,
end=24,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
# hooks
default_hooks = dict(
timer=dict(type='IterTimerHook'),
logger=dict(type='LoggerHook', interval=50),
param_scheduler=dict(type='ParamSchedulerHook'),
checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=1),
sampler_seed=dict(type='DistSamplerSeedHook'),
)
# training schedule for 2x
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=24, val_interval=24)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# runtime
default_scope = 'mmdet3d'
env_cfg = dict(
cudnn_benchmark=False,
mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
dist_cfg=dict(backend='nccl'),
)
log_level = 'INFO'
load_from = None
resume = False
find_unused_parameters = True # only 1 of 4 FPN outputs is used
_base_ = ['./multiview-dfm_r101_dcn_2x16_waymoD5-3d-3class.py']
model = dict(
bbox_head=dict(
_delete_=True,
type='CenterHead',
in_channels=256,
tasks=[
dict(num_class=1, class_names=['Pedestrian']),
dict(num_class=1, class_names=['Cyclist']),
dict(num_class=1, class_names=['Car']),
],
common_heads=dict(reg=(2, 2), height=(1, 2), dim=(3, 2), rot=(2, 2)),
share_conv_channel=64,
bbox_coder=dict(
type='CenterPointBBoxCoder',
post_center_range=[-35.0, -75.0, -2, 75.0, 75.0, 4],
pc_range=[-35.0, -75.0, -2, 75.0, 75.0, 4],
max_num=2000,
score_threshold=0,
out_size_factor=1,
voxel_size=(.50, .50),
code_size=7),
separate_head=dict(
type='SeparateHead', init_bias=-2.19, final_kernel=3),
loss_cls=dict(type='mmdet.GaussianFocalLoss', reduction='mean'),
loss_bbox=dict(
type='mmdet.L1Loss', reduction='mean', loss_weight=0.25),
norm_bbox=True),
train_cfg=dict(
_delete_=True,
grid_size=[220, 300, 1],
voxel_size=(0.5, 0.5, 6),
out_size_factor=1,
dense_reg=1,
gaussian_overlap=0.1,
max_objs=500,
min_radius=2,
code_weights=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
point_cloud_range=[-35.0, -75.0, -2, 75.0, 75.0, 4]),
test_cfg=dict(
_delete_=True,
post_center_limit_range=[-35.0, -75.0, -2, 75.0, 75.0, 4],
max_per_img=4096,
max_pool_nms=False,
min_radius=[0.5, 2, 6],
score_threshold=0,
out_size_factor=1,
voxel_size=(0.5, 0.5),
nms_type='circle',
pre_max_size=2000,
post_max_size=200,
nms_thr=0.2))
...@@ -53,9 +53,7 @@ test_pipeline = [ ...@@ -53,9 +53,7 @@ test_pipeline = [
] ]
train_dataloader = dict( train_dataloader = dict(
batch_size=2, batch_size=2, num_workers=2, dataset=dict(pipeline=train_pipeline))
num_workers=2,
dataset=dict(dataset=dict(pipeline=train_pipeline)))
test_dataloader = dict(dataset=dict(pipeline=test_pipeline)) test_dataloader = dict(dataset=dict(pipeline=test_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment