Unverified Commit 6c03a971 authored by Tai-Wang's avatar Tai-Wang Committed by GitHub
Browse files

Release v1.1.0rc1

Release v1.1.0rc1
parents 9611c2d0 ca42c312
...@@ -67,3 +67,8 @@ default_hooks = dict( ...@@ -67,3 +67,8 @@ default_hooks = dict(
logger=dict(type='LoggerHook', interval=30) logger=dict(type='LoggerHook', interval=30)
) )
# yapf:enable # yapf:enable
# Default setting for scaling LR automatically
# - `enable` means enable scaling LR automatically
# or not by default.
# - `base_batch_size` = (8 GPUs) x (3 samples per GPU).
auto_scale_lr = dict(enable=False, base_batch_size=24)
...@@ -217,3 +217,8 @@ test_dataloader = dict(dataset=dict(pipeline=test_pipeline)) ...@@ -217,3 +217,8 @@ test_dataloader = dict(dataset=dict(pipeline=test_pipeline))
# may also use your own pre-trained image branch # may also use your own pre-trained image branch
load_from = 'https://download.openmmlab.com/mmdetection3d/v0.1.0_models/imvotenet/imvotenet_faster_rcnn_r50_fpn_2x4_sunrgbd-3d-10class/imvotenet_faster_rcnn_r50_fpn_2x4_sunrgbd-3d-10class_20210323_173222-cad62aeb.pth' # noqa load_from = 'https://download.openmmlab.com/mmdetection3d/v0.1.0_models/imvotenet/imvotenet_faster_rcnn_r50_fpn_2x4_sunrgbd-3d-10class/imvotenet_faster_rcnn_r50_fpn_2x4_sunrgbd-3d-10class_20210323_173222-cad62aeb.pth' # noqa
# Default setting for scaling LR automatically
# - `enable` means enable scaling LR automatically
# or not by default.
# - `base_batch_size` = (8 GPUs) x (16 samples per GPU).
auto_scale_lr = dict(enable=False, base_batch_size=128)
_base_ = [
'../_base_/schedules/mmdet-schedule-1x.py', '../_base_/default_runtime.py'
]
model = dict( model = dict(
type='ImVoxelNet', type='ImVoxelNet',
data_preprocessor=dict( data_preprocessor=dict(
...@@ -151,7 +155,8 @@ test_evaluator = val_evaluator ...@@ -151,7 +155,8 @@ test_evaluator = val_evaluator
# optimizer # optimizer
optim_wrapper = dict( optim_wrapper = dict(
type='OptimWrapper', type='OptimWrapper',
optimizer=dict(type='AdamW', lr=0.0001, weight_decay=0.0001), optimizer=dict(
_delete_=True, type='AdamW', lr=0.0001, weight_decay=0.0001),
paramwise_cfg=dict( paramwise_cfg=dict(
custom_keys={'backbone': dict(lr_mult=0.1, decay_mult=1.0)}), custom_keys={'backbone': dict(lr_mult=0.1, decay_mult=1.0)}),
clip_grad=dict(max_norm=35., norm_type=2)) clip_grad=dict(max_norm=35., norm_type=2))
...@@ -166,30 +171,7 @@ param_scheduler = [ ...@@ -166,30 +171,7 @@ param_scheduler = [
] ]
# hooks # hooks
default_hooks = dict( default_hooks = dict(checkpoint=dict(type='CheckpointHook', max_keep_ckpts=1))
timer=dict(type='IterTimerHook'),
logger=dict(type='LoggerHook', interval=50),
param_scheduler=dict(type='ParamSchedulerHook'),
checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=1),
sampler_seed=dict(type='DistSamplerSeedHook'),
)
# training schedule for 2x
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=12, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# runtime # runtime
default_scope = 'mmdet3d'
env_cfg = dict(
cudnn_benchmark=False,
mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
dist_cfg=dict(backend='nccl'),
)
log_level = 'INFO'
load_from = None
resume = False
dist_params = dict(backend='nccl')
find_unused_parameters = True # only 1 of 4 FPN outputs is used find_unused_parameters = True # only 1 of 4 FPN outputs is used
...@@ -127,3 +127,9 @@ test_evaluator = val_evaluator ...@@ -127,3 +127,9 @@ test_evaluator = val_evaluator
# Part-A2 uses a different learning rate from what SECOND uses. # Part-A2 uses a different learning rate from what SECOND uses.
optim_wrapper = dict(optimizer=dict(lr=0.001)) optim_wrapper = dict(optimizer=dict(lr=0.001))
find_unused_parameters = True find_unused_parameters = True
# Default setting for scaling LR automatically
# - `enable` means enable scaling LR automatically
# or not by default.
# - `base_batch_size` = (8 GPUs) x (2 samples per GPU).
auto_scale_lr = dict(enable=False, base_batch_size=16)
...@@ -113,7 +113,7 @@ val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) ...@@ -113,7 +113,7 @@ val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
# optimizer # optimizer
optim_wrapper = dict( optim_wrapper = dict(
optimizer=dict(lr=0.01), optimizer=dict(lr=0.001),
paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.), paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.),
clip_grad=dict(max_norm=35, norm_type=2)) clip_grad=dict(max_norm=35, norm_type=2))
...@@ -134,4 +134,4 @@ param_scheduler = [ ...@@ -134,4 +134,4 @@ param_scheduler = [
gamma=0.1) gamma=0.1)
] ]
train_cfg = dict(max_epochs=48) train_cfg = dict(max_epochs=48, val_interval=2)
...@@ -6,7 +6,8 @@ _base_ = [ ...@@ -6,7 +6,8 @@ _base_ = [
# dataset settings # dataset settings
dataset_type = 'KittiDataset' dataset_type = 'KittiDataset'
data_root = 'data/kitti/' data_root = 'data/kitti/'
class_names = ['Car', 'Pedestrian', 'Cyclist'] class_names = ['Pedestrian', 'Cyclist', 'Car']
metainfo = dict(CLASSES=class_names)
point_cloud_range = [0, -40, -3, 70.4, 40, 1] point_cloud_range = [0, -40, -3, 70.4, 40, 1]
input_modality = dict(use_lidar=True, use_camera=False) input_modality = dict(use_lidar=True, use_camera=False)
...@@ -42,8 +43,9 @@ train_pipeline = [ ...@@ -42,8 +43,9 @@ train_pipeline = [
dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range),
dict(type='PointSample', num_points=16384, sample_range=40.0), dict(type='PointSample', num_points=16384, sample_range=40.0),
dict(type='PointShuffle'), dict(type='PointShuffle'),
dict(type='DefaultFormatBundle3D', class_names=class_names), dict(
dict(type='Collect3D', keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) type='Pack3DDetInputs',
keys=['points', 'gt_bboxes_3d', 'gt_labels_3d'])
] ]
test_pipeline = [ test_pipeline = [
dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4), dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4),
...@@ -61,36 +63,67 @@ test_pipeline = [ ...@@ -61,36 +63,67 @@ test_pipeline = [
dict(type='RandomFlip3D'), dict(type='RandomFlip3D'),
dict( dict(
type='PointsRangeFilter', point_cloud_range=point_cloud_range), type='PointsRangeFilter', point_cloud_range=point_cloud_range),
dict(type='PointSample', num_points=16384, sample_range=40.0), dict(type='PointSample', num_points=16384, sample_range=40.0)
dict( ]),
type='DefaultFormatBundle3D', dict(type='Pack3DDetInputs', keys=['points'])
class_names=class_names,
with_label=False),
dict(type='Collect3D', keys=['points'])
])
] ]
train_dataloader = dict(
data = dict( batch_size=2,
samples_per_gpu=2, num_workers=2,
workers_per_gpu=2, dataset=dict(
train=dict(
type='RepeatDataset', type='RepeatDataset',
times=2, times=2,
dataset=dict(pipeline=train_pipeline, classes=class_names)), dataset=dict(pipeline=train_pipeline, metainfo=metainfo)))
val=dict(pipeline=test_pipeline, classes=class_names), test_dataloader = dict(dataset=dict(pipeline=test_pipeline, metainfo=metainfo))
test=dict(pipeline=test_pipeline, classes=class_names)) val_dataloader = dict(dataset=dict(pipeline=test_pipeline, metainfo=metainfo))
# optimizer
lr = 0.001 # max learning rate lr = 0.001 # max learning rate
optimizer = dict(lr=lr, betas=(0.95, 0.85)) optim_wrapper = dict(optimizer=dict(lr=lr, betas=(0.95, 0.85)))
# runtime settings train_cfg = dict(by_epoch=True, max_epochs=80, val_interval=2)
runner = dict(type='EpochBasedRunner', max_epochs=80)
evaluation = dict(interval=2) # Default setting for scaling LR automatically
# yapf:disable # - `enable` means enable scaling LR automatically
log_config = dict( # or not by default.
interval=30, # - `base_batch_size` = (8 GPUs) x (2 samples per GPU).
hooks=[ auto_scale_lr = dict(enable=False, base_batch_size=16)
dict(type='TextLoggerHook'), param_scheduler = [
dict(type='TensorboardLoggerHook') # learning rate scheduler
]) # During the first 35 epochs, learning rate increases from 0 to lr * 10
# yapf:enable # during the next 45 epochs, learning rate decreases from lr * 10 to
# lr * 1e-4
dict(
type='CosineAnnealingLR',
T_max=35,
eta_min=lr * 10,
begin=0,
end=35,
by_epoch=True,
convert_to_iter_based=True),
dict(
type='CosineAnnealingLR',
T_max=45,
eta_min=lr * 1e-4,
begin=35,
end=80,
by_epoch=True,
convert_to_iter_based=True),
# momentum scheduler
# During the first 35 epochs, momentum increases from 0 to 0.85 / 0.95
# during the next 45 epochs, momentum increases from 0.85 / 0.95 to 1
dict(
type='CosineAnnealingMomentum',
T_max=35,
eta_min=0.85 / 0.95,
begin=0,
end=35,
by_epoch=True,
convert_to_iter_based=True),
dict(
type='CosineAnnealingMomentum',
T_max=45,
eta_min=1,
begin=35,
end=80,
by_epoch=True,
convert_to_iter_based=True)
]
...@@ -3,3 +3,8 @@ _base_ = [ ...@@ -3,3 +3,8 @@ _base_ = [
'../_base_/datasets/lyft-3d-range100.py', '../_base_/datasets/lyft-3d-range100.py',
'../_base_/schedules/schedule-2x.py', '../_base_/default_runtime.py' '../_base_/schedules/schedule-2x.py', '../_base_/default_runtime.py'
] ]
# Default setting for scaling LR automatically
# - `enable` means enable scaling LR automatically
# or not by default.
# - `base_batch_size` = (8 GPUs) x (2 samples per GPU).
auto_scale_lr = dict(enable=False, base_batch_size=16)
...@@ -3,3 +3,8 @@ _base_ = [ ...@@ -3,3 +3,8 @@ _base_ = [
'../_base_/datasets/lyft-3d.py', '../_base_/schedules/schedule-2x.py', '../_base_/datasets/lyft-3d.py', '../_base_/schedules/schedule-2x.py',
'../_base_/default_runtime.py' '../_base_/default_runtime.py'
] ]
# Default setting for scaling LR automatically
# - `enable` means enable scaling LR automatically
# or not by default.
# - `base_batch_size` = (8 GPUs) x (2 samples per GPU).
auto_scale_lr = dict(enable=False, base_batch_size=16)
...@@ -6,4 +6,9 @@ _base_ = [ ...@@ -6,4 +6,9 @@ _base_ = [
] ]
# data settings # data settings
data = dict(train=dict(dataset=dict(load_interval=1))) train_dataloader = dict(dataset=dict(dataset=dict(load_interval=1)))
# Default setting for scaling LR automatically
# - `enable` means enable scaling LR automatically
# or not by default.
# - `base_batch_size` = (16 GPUs) x (2 samples per GPU).
auto_scale_lr = dict(enable=False, base_batch_size=32)
...@@ -6,7 +6,7 @@ _base_ = [ ...@@ -6,7 +6,7 @@ _base_ = [
] ]
# data settings # data settings
data = dict(train=dict(dataset=dict(load_interval=1))) train_dataloader = dict(dataset=dict(dataset=dict(load_interval=1)))
# model settings # model settings
model = dict( model = dict(
...@@ -35,3 +35,8 @@ model = dict( ...@@ -35,3 +35,8 @@ model = dict(
code_weight=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], code_weight=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
pos_weight=-1, pos_weight=-1,
debug=False))) debug=False)))
# Default setting for scaling LR automatically
# - `enable` means enable scaling LR automatically
# or not by default.
# - `base_batch_size` = (16 GPUs) x (2 samples per GPU).
auto_scale_lr = dict(enable=False, base_batch_size=32)
...@@ -32,3 +32,8 @@ model = dict( ...@@ -32,3 +32,8 @@ model = dict(
code_weight=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], code_weight=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
pos_weight=-1, pos_weight=-1,
debug=False))) debug=False)))
# Default setting for scaling LR automatically
# - `enable` means enable scaling LR automatically
# or not by default.
# - `base_batch_size` = (16 GPUs) x (2 samples per GPU).
auto_scale_lr = dict(enable=False, base_batch_size=32)
...@@ -40,3 +40,8 @@ model = dict( ...@@ -40,3 +40,8 @@ model = dict(
], ],
rotations=[0, 1.57], rotations=[0, 1.57],
reshape_out=True))) reshape_out=True)))
# Default setting for scaling LR automatically
# - `enable` means enable scaling LR automatically
# or not by default.
# - `base_batch_size` = (8 GPUs) x (2 samples per GPU).
auto_scale_lr = dict(enable=False, base_batch_size=16)
...@@ -41,3 +41,8 @@ model = dict( ...@@ -41,3 +41,8 @@ model = dict(
], ],
rotations=[0, 1.57], rotations=[0, 1.57],
reshape_out=True))) reshape_out=True)))
# Default setting for scaling LR automatically
# - `enable` means enable scaling LR automatically
# or not by default.
# - `base_batch_size` = (8 GPUs) x (2 samples per GPU).
auto_scale_lr = dict(enable=False, base_batch_size=16)
...@@ -22,3 +22,8 @@ model = dict( ...@@ -22,3 +22,8 @@ model = dict(
norm_eval=False, norm_eval=False,
style='pytorch'), style='pytorch'),
pts_neck=dict(in_channels=[64, 160, 384])) pts_neck=dict(in_channels=[64, 160, 384]))
# Default setting for scaling LR automatically
# - `enable` means enable scaling LR automatically
# or not by default.
# - `base_batch_size` = (8 GPUs) x (2 samples per GPU).
auto_scale_lr = dict(enable=False, base_batch_size=16)
...@@ -22,3 +22,8 @@ model = dict( ...@@ -22,3 +22,8 @@ model = dict(
norm_eval=False, norm_eval=False,
style='pytorch'), style='pytorch'),
pts_neck=dict(in_channels=[64, 160, 384])) pts_neck=dict(in_channels=[64, 160, 384]))
# Default setting for scaling LR automatically
# - `enable` means enable scaling LR automatically
# or not by default.
# - `base_batch_size` = (8 GPUs) x (2 samples per GPU).
auto_scale_lr = dict(enable=False, base_batch_size=16)
...@@ -140,3 +140,8 @@ test_dataloader = dict( ...@@ -140,3 +140,8 @@ test_dataloader = dict(
test_mode=True, test_mode=True,
metainfo=metainfo, metainfo=metainfo,
box_type_3d='LiDAR')) box_type_3d='LiDAR'))
# Default setting for scaling LR automatically
# - `enable` means enable scaling LR automatically
# or not by default.
# - `base_batch_size` = (16 GPUs) x (2 samples per GPU).
auto_scale_lr = dict(enable=False, base_batch_size=32)
...@@ -47,8 +47,10 @@ train_dataloader = dict( ...@@ -47,8 +47,10 @@ train_dataloader = dict(
test_dataloader = dict(dataset=dict(pipeline=test_pipeline)) test_dataloader = dict(dataset=dict(pipeline=test_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
# training schedule for 1x # training schedule for 6x
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=12, val_interval=1) max_epochs = 72
train_cfg = dict(
type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=5)
val_cfg = dict(type='ValLoop') val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop') test_cfg = dict(type='TestLoop')
...@@ -57,9 +59,9 @@ param_scheduler = [ ...@@ -57,9 +59,9 @@ param_scheduler = [
dict( dict(
type='MultiStepLR', type='MultiStepLR',
begin=0, begin=0,
end=12, end=max_epochs,
by_epoch=True, by_epoch=True,
milestones=[8, 11], milestones=[50],
gamma=0.1) gamma=0.1)
] ]
...@@ -68,3 +70,5 @@ optim_wrapper = dict( ...@@ -68,3 +70,5 @@ optim_wrapper = dict(
type='OptimWrapper', type='OptimWrapper',
optimizer=dict(type='Adam', lr=2.5e-4), optimizer=dict(type='Adam', lr=2.5e-4),
clip_grad=None) clip_grad=None)
find_unused_parameters = True
...@@ -220,3 +220,8 @@ model = dict( ...@@ -220,3 +220,8 @@ model = dict(
code_weight=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], code_weight=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
pos_weight=-1, pos_weight=-1,
debug=False))) debug=False)))
# Default setting for scaling LR automatically
# - `enable` means enable scaling LR automatically
# or not by default.
# - `base_batch_size` = (16 GPUs) x (2 samples per GPU).
auto_scale_lr = dict(enable=False, base_batch_size=32)
...@@ -20,3 +20,8 @@ model = dict( ...@@ -20,3 +20,8 @@ model = dict(
[0.404671, 1.071108, 1.688889], [0.76584, 1.398258, 0.472728] [0.404671, 1.071108, 1.688889], [0.76584, 1.398258, 0.472728]
]), ]),
)) ))
# Default setting for scaling LR automatically
# - `enable` means enable scaling LR automatically
# or not by default.
# - `base_batch_size` = (8 GPUs) x (16 samples per GPU).
auto_scale_lr = dict(enable=False, base_batch_size=128)
...@@ -32,3 +32,8 @@ model = dict( ...@@ -32,3 +32,8 @@ model = dict(
[0.47535285, 0.49249494, 0.5802117]]))) [0.47535285, 0.49249494, 0.5802117]])))
default_hooks = dict(logger=dict(type='LoggerHook', interval=30)) default_hooks = dict(logger=dict(type='LoggerHook', interval=30))
# Default setting for scaling LR automatically
# - `enable` means enable scaling LR automatically
# or not by default.
# - `base_batch_size` = (8 GPUs) x (8 samples per GPU).
auto_scale_lr = dict(enable=False, base_batch_size=64)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment