"git@developer.sourcefind.cn:OpenDAS/mmdetection3d.git" did not exist on "b16c8dfab492637be330987a6503956df809e1ef"
Unverified Commit d865e2ce authored by Wenwei Zhang's avatar Wenwei Zhang Committed by GitHub
Browse files

Rename CosineAnealing to CosineAnnealing (#57)

parent 65264596
...@@ -27,7 +27,7 @@ optimizer = dict( ...@@ -27,7 +27,7 @@ optimizer = dict(
weight_decay=0.001) weight_decay=0.001)
lr_config = dict( lr_config = dict(
_delete_=True, _delete_=True,
policy='CosineAnealing', policy='CosineAnnealing',
warmup='linear', warmup='linear',
warmup_iters=1000, warmup_iters=1000,
warmup_ratio=1.0 / 10, warmup_ratio=1.0 / 10,
......
...@@ -228,7 +228,7 @@ optimizer = dict(type='AdamW', lr=0.003, betas=(0.95, 0.99), weight_decay=0.01) ...@@ -228,7 +228,7 @@ optimizer = dict(type='AdamW', lr=0.003, betas=(0.95, 0.99), weight_decay=0.01)
# max_norm=10 is better for SECOND # max_norm=10 is better for SECOND
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
lr_config = dict( lr_config = dict(
policy='CosineAnealing', policy='CosineAnnealing',
warmup='linear', warmup='linear',
warmup_iters=1000, warmup_iters=1000,
warmup_ratio=1.0 / 10, warmup_ratio=1.0 / 10,
......
...@@ -324,7 +324,7 @@ optimizer_config = dict( # Config used to build the optimizer hook, refer to ht ...@@ -324,7 +324,7 @@ optimizer_config = dict( # Config used to build the optimizer hook, refer to ht
max_norm=10, # max norm of the gradients max_norm=10, # max norm of the gradients
norm_type=2)) # Type of the used p-norm. Can be 'inf' for infinity norm. norm_type=2)) # Type of the used p-norm. Can be 'inf' for infinity norm.
lr_config = dict( # Learning rate scheduler config used to register LrUpdater hook lr_config = dict( # Learning rate scheduler config used to register LrUpdater hook
policy='step', # The policy of scheduler, also support CosineAnealing, Cyclic, etc. Refer to details of supported LrUpdater from https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/lr_updater.py#L9. policy='step', # The policy of scheduler, also support CosineAnnealing, Cyclic, etc. Refer to details of supported LrUpdater from https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/lr_updater.py#L9.
warmup=None, # The warmup policy, also support `exp` and `constant`. warmup=None, # The warmup policy, also support `exp` and `constant`.
step=[24, 32]) # Steps to decay the learning rate step=[24, 32]) # Steps to decay the learning rate
checkpoint_config = dict( # Config to set the checkpoint hook, Refer to https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/checkpoint.py for implementation. checkpoint_config = dict( # Config to set the checkpoint hook, Refer to https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/checkpoint.py for implementation.
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment