Commit 85529f35 authored by unknown's avatar unknown
Browse files

添加openmmlab测试用例

parent b21b0c01
# optimizer
optimizer = dict(
type='SGD', lr=0.8, momentum=0.9, weight_decay=0.0001, nesterov=True)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=2500,
warmup_ratio=0.25,
step=[30, 60, 90])
runner = dict(type='EpochBasedRunner', max_epochs=100)
# optimizer
# In ClassyVision, the lr is set to 0.003 for bs4096.
# In this implementation(bs2048), lr = 0.003 / 4096 * (32bs * 64gpus) = 0.0015
optimizer = dict(type='AdamW', lr=0.0015, weight_decay=0.3)
optimizer_config = dict(grad_clip=dict(max_norm=1.0))
# specific to vit pretrain
paramwise_cfg = dict(
custom_keys={
'.backbone.cls_token': dict(decay_mult=0.0),
'.backbone.pos_embed': dict(decay_mult=0.0)
})
# learning policy
lr_config = dict(
policy='CosineAnnealing',
min_lr=0,
warmup='linear',
warmup_iters=10000,
warmup_ratio=1e-4)
runner = dict(type='EpochBasedRunner', max_epochs=300)
# optimizer
optimizer = dict(
type='SGD', lr=0.8, momentum=0.9, weight_decay=0.0001, nesterov=True)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='CosineAnnealing',
min_lr=0,
warmup='linear',
warmup_iters=2500,
warmup_ratio=0.25)
runner = dict(type='EpochBasedRunner', max_epochs=100)
# optimizer
optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(policy='step', step=[30, 60, 90])
runner = dict(type='EpochBasedRunner', max_epochs=100)
# optimizer
optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(policy='step', step=[40, 80, 120])
runner = dict(type='EpochBasedRunner', max_epochs=140)
# optimizer
optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(policy='CosineAnnealing', min_lr=0)
runner = dict(type='EpochBasedRunner', max_epochs=100)
# optimizer
optimizer = dict(type='SGD', lr=0.045, momentum=0.9, weight_decay=0.00004)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(policy='step', gamma=0.98, step=1)
runner = dict(type='EpochBasedRunner', max_epochs=300)
# optimizer
optimizer = dict(type='AdamW', lr=0.003, weight_decay=0.3)
optimizer_config = dict(grad_clip=dict(max_norm=1.0))
# specific to vit pretrain
paramwise_cfg = dict(
custom_keys={
'.backbone.cls_token': dict(decay_mult=0.0),
'.backbone.pos_embed': dict(decay_mult=0.0)
})
# learning policy
lr_config = dict(
policy='CosineAnnealing',
min_lr=0,
warmup='linear',
warmup_iters=10000,
warmup_ratio=1e-4)
runner = dict(type='EpochBasedRunner', max_epochs=300)
# Mixed Precision Training
## Introduction
<!-- [OTHERS] -->
```latex
@article{micikevicius2017mixed,
title={Mixed precision training},
author={Micikevicius, Paulius and Narang, Sharan and Alben, Jonah and Diamos, Gregory and Elsen, Erich and Garcia, David and Ginsburg, Boris and Houston, Michael and Kuchaiev, Oleksii and Venkatesh, Ganesh and others},
journal={arXiv preprint arXiv:1710.03740},
year={2017}
}
```
## Results and models
| Model | Params(M) | Flops(G) | Mem (GB) | Top-1 (%) | Top-5 (%) | Config | Download |
|:---------------------:|:---------:|:--------:|:---------:|:---------:|:---------:| :---------:|:--------:|
| ResNet-50 | 25.56 | 4.12 | 1.9 |76.32 | 93.04 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/fp16/resnet50_b32x8_fp16_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/fp16/resnet50_batch256_fp16_imagenet_20210320-b3964210.pth) &#124; [log](https://download.openmmlab.com/mmclassification/v0/fp16/resnet50_batch256_fp16_imagenet_20210320-b3964210.log.json) |
Collections:
- Name: FP16
Metadata:
Training Data: ImageNet
Training Resources: 8x V100 GPUs
Training Techniques:
- SGD with Momentum
- Weight Decay
- Mixed Precision Training
Paper: https://arxiv.org/abs/1710.03740
README: configs/fp16/README.md
Models:
- Config: configs/fp16/resnet50_b32x8_fp16_dynamic_imagenet.py
In Collection: FP16
Metadata:
FLOPs: 4120000000
Parameters: 25560000
Epochs: 100
Batch Size: 256
Architecture:
- ResNet
Name: resnet50_b32x8_fp16_dynamic_imagenet
Results:
- Dataset: ImageNet
Metrics:
Top 1 Accuracy: 76.32
Top 5 Accuracy: 93.04
Task: Image Classification
Weights: https://download.openmmlab.com/mmclassification/v0/fp16/resnet50_batch256_fp16_imagenet_20210320-b3964210.pth
_base_ = [
'../_base_/models/resnet152.py', '../speed_test/datasets/imagenet_bs32.py',
'../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py'
]
# fp16 settings
fp16 = dict(loss_scale=512.)
\ No newline at end of file
_base_ = [
'../_base_/models/resnet18.py', '../speed_test/datasets/imagenet_bs32.py',
'../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py'
]
# fp16 settings
fp16 = dict(loss_scale=512.)
\ No newline at end of file
_base_ = [
'../_base_/models/resnet34.py', '../speed_test/datasets/imagenet_bs32.py',
'../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py'
]
# fp16 settings
fp16 = dict(loss_scale=512.)
\ No newline at end of file
_base_ = ['../resnet/resnet50_b32x8_imagenet.py']
# fp16 settings
fp16 = dict(loss_scale='dynamic')
#_base_ = ['../resnet/resnet50_b32x8_imagenet.py']
_base_ = [
'../_base_/models/resnet50.py', '../speed_test/datasets/imagenet_bs32.py',
'../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py'
]
# fp16 settings
fp16 = dict(loss_scale=512.)
_base_ = [
'../_base_/models/resnext50_32x4d.py', '../speed_test/datasets/imagenet_bs32.py',
'../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py'
]
# fp16 settings
fp16 = dict(loss_scale=512.)
\ No newline at end of file
_base_ = [
'../_base_/models/seresnet50.py', '../speed_test/datasets/imagenet_bs32.py',
'../_base_/schedules/imagenet_bs256_140e.py',
'../_base_/default_runtime.py'
]
# fp16 settings
fp16 = dict(loss_scale=512.)
\ No newline at end of file
_base_ = [
'../_base_/models/shufflenet_v1_1x.py', '../speed_test/datasets/imagenet_bs64.py',
'../_base_/schedules/imagenet_bs1024_linearlr_bn_nowd.py',
'../_base_/default_runtime.py'
]
# fp16 settings
fp16 = dict(loss_scale=512.)
\ No newline at end of file
_base_ = [
'../_base_/models/shufflenet_v2_1x.py', '../speed_test/datasets/imagenet_bs64.py',
'../_base_/schedules/imagenet_bs1024_linearlr_bn_nowd.py',
'../_base_/default_runtime.py'
]
# fp16 settings
fp16 = dict(loss_scale=512.)
\ No newline at end of file
_base_ = [
'../_base_/models/vgg11.py', '../speed_test/datasets/imagenet_bs32.py',
'../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py'
]
# fp16 settings
fp16 = dict(loss_scale=512.)
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment