Commit 5517c8eb authored by liyinhao's avatar liyinhao Committed by zhangwenwei
Browse files

Openlidar benchmark config

parent e06a6ea4
......@@ -252,9 +252,6 @@ data = dict(
samples_per_gpu=4,
workers_per_gpu=4,
train=dict(
type='RepeatDataset',
times=2,
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file=data_root + 'kitti_infos_train.pkl',
......@@ -263,7 +260,7 @@ data = dict(
pipeline=train_pipeline,
modality=input_modality,
classes=class_names,
test_mode=False)),
test_mode=False),
val=dict(
type=dataset_type,
data_root=data_root,
......@@ -309,7 +306,7 @@ log_config = dict(
])
# yapf:enable
# runtime settings
total_epochs = 40
total_epochs = 80
dist_params = dict(backend='nccl', port=29506)
log_level = 'INFO'
find_unused_parameters = True
......
......@@ -119,7 +119,7 @@ db_sampler = dict(
)),
classes=class_names,
sample_groups=dict(
Car=20,
Car=15,
Pedestrian=15,
Cyclist=15,
))
......@@ -197,26 +197,24 @@ data = dict(
classes=class_names,
test_mode=True))
# optimizer
lr = 0.001 # max learning rate
lr = 0.0003 # max learning rate
optimizer = dict(
type='AdamW',
lr=lr,
betas=(0.95, 0.99), # the momentum is change during training
weight_decay=0.01)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
optimizer_config = dict(grad_clip=dict(max_norm=10, norm_type=2))
# learning policy
lr_config = dict(
policy='cyclic',
target_ratio=(10, 1e-4),
cyclic_times=1,
step_ratio_up=0.4,
)
step_ratio_up=0.4)
momentum_config = dict(
policy='cyclic',
target_ratio=(0.85 / 0.95, 1),
cyclic_times=1,
step_ratio_up=0.4,
)
step_ratio_up=0.4)
checkpoint_config = dict(interval=1)
evaluation = dict(interval=2)
# yapf:disable
......@@ -228,7 +226,7 @@ log_config = dict(
])
# yapf:enable
# runtime settings
total_epochs = 160
total_epochs = 80
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/pp_secfpn_80e'
......
......@@ -201,21 +201,19 @@ data = dict(
classes=class_names,
test_mode=True))
# optimizer
lr = 0.0018 # max learning rate
lr = 0.0003 # max learning rate
optimizer = dict(type='AdamW', lr=lr, betas=(0.95, 0.99), weight_decay=0.01)
optimizer_config = dict(grad_clip=dict(max_norm=10, norm_type=2))
lr_config = dict(
policy='cyclic',
target_ratio=(10, 1e-4),
cyclic_times=1,
step_ratio_up=0.4,
)
step_ratio_up=0.4)
momentum_config = dict(
policy='cyclic',
target_ratio=(0.85 / 0.95, 1),
cyclic_times=1,
step_ratio_up=0.4,
)
step_ratio_up=0.4)
checkpoint_config = dict(interval=1)
evaluation = dict(interval=2)
# yapf:disable
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment