Commit dff2c686 authored by renzhc's avatar renzhc
Browse files

first commit

parent 8f9dd0ed
Pipeline #1665 canceled with stages
_base_ = [
'../../_base_/models/swin_transformer/base_224.py',
'../../_base_/datasets/imagenet_bs256_swin_192.py',
'../../_base_/default_runtime.py'
]
# dataset settings
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='RandomResizedCrop',
scale=224,
backend='pillow',
interpolation='bicubic'),
dict(type='RandomFlip', prob=0.5, direction='horizontal'),
dict(
type='RandAugment',
policies='timm_increasing',
num_policies=2,
total_level=10,
magnitude_level=9,
magnitude_std=0.5,
hparams=dict(pad_val=[104, 116, 124], interpolation='bicubic')),
dict(
type='RandomErasing',
erase_prob=0.25,
mode='rand',
min_area_ratio=0.02,
max_area_ratio=0.3333333333333333,
fill_color=[103.53, 116.28, 123.675],
fill_std=[57.375, 57.12, 58.395]),
dict(type='PackInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='ResizeEdge',
scale=256,
edge='short',
backend='pillow',
interpolation='bicubic'),
dict(type='CenterCrop', crop_size=224),
dict(type='PackInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# model settings
model = dict(
backbone=dict(
img_size=224,
drop_path_rate=0.1,
stage_cfgs=dict(block_cfgs=dict(window_size=7)),
init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')))
# optimizer settings
optim_wrapper = dict(
type='AmpOptimWrapper',
optimizer=dict(type='AdamW', lr=5e-3, weight_decay=0.05),
clip_grad=dict(max_norm=5.0),
constructor='LearningRateDecayOptimWrapperConstructor',
paramwise_cfg=dict(
layer_decay_rate=0.9,
custom_keys={
'.norm': dict(decay_mult=0.0),
'.bias': dict(decay_mult=0.0),
'.absolute_pos_embed': dict(decay_mult=0.0),
'.relative_position_bias_table': dict(decay_mult=0.0)
}))
# learning rate scheduler
param_scheduler = [
dict(
type='LinearLR',
start_factor=2.5e-7 / 1.25e-3,
by_epoch=True,
begin=0,
end=20,
convert_to_iter_based=True),
dict(
type='CosineAnnealingLR',
T_max=80,
eta_min=2.5e-7 * 2048 / 512,
by_epoch=True,
begin=20,
end=100,
convert_to_iter_based=True)
]
# runtime settings
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=100)
val_cfg = dict()
test_cfg = dict()
default_hooks = dict(
# save checkpoint per epoch.
checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=3),
logger=dict(type='LoggerHook', interval=100))
randomness = dict(seed=0)
_base_ = [
'../../_base_/models/swin_transformer/base_224.py',
'../../_base_/datasets/imagenet_bs256_swin_192.py',
'../../_base_/default_runtime.py'
]
# dataset settings
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='RandomResizedCrop',
scale=224,
backend='pillow',
interpolation='bicubic'),
dict(type='RandomFlip', prob=0.5, direction='horizontal'),
dict(
type='RandAugment',
policies='timm_increasing',
num_policies=2,
total_level=10,
magnitude_level=9,
magnitude_std=0.5,
hparams=dict(pad_val=[104, 116, 124], interpolation='bicubic')),
dict(
type='RandomErasing',
erase_prob=0.25,
mode='rand',
min_area_ratio=0.02,
max_area_ratio=0.3333333333333333,
fill_color=[103.53, 116.28, 123.675],
fill_std=[57.375, 57.12, 58.395]),
dict(type='PackInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='ResizeEdge',
scale=256,
edge='short',
backend='pillow',
interpolation='bicubic'),
dict(type='CenterCrop', crop_size=224),
dict(type='PackInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# model settings
model = dict(
backbone=dict(
arch='large',
img_size=224,
drop_path_rate=0.2,
stage_cfgs=dict(block_cfgs=dict(window_size=14)),
pad_small_map=True,
init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')),
head=dict(in_channels=1536))
# optimizer settings
optim_wrapper = dict(
type='AmpOptimWrapper',
optimizer=dict(type='AdamW', lr=5e-3, weight_decay=0.05),
clip_grad=dict(max_norm=5.0),
constructor='LearningRateDecayOptimWrapperConstructor',
paramwise_cfg=dict(
layer_decay_rate=0.7,
custom_keys={
'.norm': dict(decay_mult=0.0),
'.bias': dict(decay_mult=0.0),
'.absolute_pos_embed': dict(decay_mult=0.0),
'.relative_position_bias_table': dict(decay_mult=0.0)
}))
# learning rate scheduler
param_scheduler = [
dict(
type='LinearLR',
start_factor=2.5e-7 / 1.25e-3,
by_epoch=True,
begin=0,
end=20,
convert_to_iter_based=True),
dict(
type='CosineAnnealingLR',
T_max=100,
eta_min=1e-6,
by_epoch=True,
begin=20,
end=100,
convert_to_iter_based=True)
]
# runtime settings
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=100)
val_cfg = dict()
test_cfg = dict()
default_hooks = dict(
# save checkpoint per epoch.
checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=3),
logger=dict(type='LoggerHook', interval=100))
randomness = dict(seed=0)
Collections:
- Name: SimMIM
Metadata:
Training Data: ImageNet-1k
Training Techniques:
- AdamW
Training Resources: 16x A100 GPUs
Architecture:
- Swin
Paper:
Title: 'SimMIM: A Simple Framework for Masked Image Modeling'
URL: https://arxiv.org/abs/2111.09886
README: configs/simmim/README.md
Models:
- Name: simmim_swin-base-w6_8xb256-amp-coslr-100e_in1k-192px
Metadata:
Epochs: 100
Batch Size: 2048
FLOPs: 18832161792
Parameters: 89874104
Training Data: ImageNet-1k
In Collection: SimMIM
Results: null
Weights: https://download.openmmlab.com/mmselfsup/1.x/simmim/simmim_swin-base_8xb256-amp-coslr-100e_in1k-192/simmim_swin-base_8xb256-amp-coslr-100e_in1k-192_20220829-0e15782d.pth
Config: configs/simmim/simmim_swin-base-w6_8xb256-amp-coslr-100e_in1k-192px.py
Downstream:
- swin-base-w6_simmim-100e-pre_8xb256-coslr-100e_in1k-192px
- swin-base-w7_simmim-100e-pre_8xb256-coslr-100e_in1k
- Name: simmim_swin-base-w6_16xb128-amp-coslr-800e_in1k-192px
Metadata:
Epochs: 100
Batch Size: 2048
FLOPs: 18832161792
Parameters: 89874104
Training Data: ImageNet-1k
In Collection: SimMIM
Results: null
Weights: https://download.openmmlab.com/mmselfsup/1.x/simmim/simmim_swin-base_16xb128-amp-coslr-800e_in1k-192/simmim_swin-base_16xb128-amp-coslr-800e_in1k-192_20220916-a0e931ac.pth
Config: configs/simmim/simmim_swin-base-w6_16xb128-amp-coslr-800e_in1k-192px.py
Downstream:
- swin-base-w6_simmim-800e-pre_8xb256-coslr-100e_in1k-192px
- Name: simmim_swin-large-w12_16xb128-amp-coslr-800e_in1k-192px
Metadata:
Epochs: 100
Batch Size: 2048
FLOPs: 55849130496
Parameters: 199920372
Training Data: ImageNet-1k
In Collection: SimMIM
Results: null
Weights: https://download.openmmlab.com/mmselfsup/1.x/simmim/simmim_swin-large_16xb128-amp-coslr-800e_in1k-192/simmim_swin-large_16xb128-amp-coslr-800e_in1k-192_20220916-4ad216d3.pth
Config: configs/simmim/simmim_swin-large-w12_16xb128-amp-coslr-800e_in1k-192px.py
Downstream:
- swin-large-w14_simmim-800e-pre_8xb256-coslr-100e_in1k
- Name: swin-base-w6_simmim-100e-pre_8xb256-coslr-100e_in1k-192px
Metadata:
Epochs: 100
Batch Size: 2048
FLOPs: 11303976960
Parameters: 87750176
Training Data: ImageNet-1k
In Collection: SimMIM
Results:
- Task: Image Classification
Dataset: ImageNet-1k
Metrics:
Top 1 Accuracy: 82.7
Weights: https://download.openmmlab.com/mmselfsup/1.x/simmim/simmim_swin-base_8xb256-amp-coslr-100e_in1k-192/swin-base_ft-8xb256-coslr-100e_in1k/swin-base_ft-8xb256-coslr-100e_in1k_20220829-9cf23aa1.pth
Config: configs/simmim/benchmarks/swin-base-w6_8xb256-coslr-100e_in1k-192px.py
- Name: swin-base-w7_simmim-100e-pre_8xb256-coslr-100e_in1k
Metadata:
Epochs: 100
Batch Size: 2048
FLOPs: 15466852352
Parameters: 87768224
Training Data: ImageNet-1k
In Collection: SimMIM
Results:
- Task: Image Classification
Dataset: ImageNet-1k
Metrics:
Top 1 Accuracy: 83.5
Weights: null
Config: configs/simmim/benchmarks/swin-base-w7_8xb256-coslr-100e_in1k.py
- Name: swin-base-w6_simmim-800e-pre_8xb256-coslr-100e_in1k-192px
Metadata:
Epochs: 100
Batch Size: 2048
FLOPs: 15466852352
Parameters: 87768224
Training Data: ImageNet-1k
In Collection: SimMIM
Results:
- Task: Image Classification
Dataset: ImageNet-1k
Metrics:
Top 1 Accuracy: 83.8
Weights: https://download.openmmlab.com/mmselfsup/1.x/simmim/simmim_swin-base_16xb128-amp-coslr-800e_in1k-192/swin-base_ft-8xb256-coslr-100e_in1k-224/swin-base_ft-8xb256-coslr-100e_in1k-224_20221208-155cc6e6.pth
Config: configs/simmim/benchmarks/swin-base-w7_8xb256-coslr-100e_in1k.py
- Name: swin-large-w14_simmim-800e-pre_8xb256-coslr-100e_in1k
Metadata:
Epochs: 100
Batch Size: 2048
FLOPs: 38853083136
Parameters: 196848316
Training Data: ImageNet-1k
In Collection: SimMIM
Results:
- Task: Image Classification
Dataset: ImageNet-1k
Metrics:
Top 1 Accuracy: 84.8
Weights: https://download.openmmlab.com/mmselfsup/1.x/simmim/simmim_swin-large_16xb128-amp-coslr-800e_in1k-192/swin-large_ft-8xb256-coslr-ws14-100e_in1k-224/swin-large_ft-8xb256-coslr-ws14-100e_in1k-224_20220916-d4865790.pth
Config: configs/simmim/benchmarks/swin-large-w14_8xb256-coslr-100e_in1k.py
_base_ = 'simmim_swin-base-w6_8xb256-amp-coslr-100e_in1k-192px.py'
# dataset 16 GPUs x 128
train_dataloader = dict(batch_size=128)
_base_ = [
'../_base_/datasets/imagenet_bs256_simmim_192.py',
'../_base_/default_runtime.py',
]
# model settings
model = dict(
type='SimMIM',
backbone=dict(
type='SimMIMSwinTransformer',
arch='base',
img_size=192,
stage_cfgs=dict(block_cfgs=dict(window_size=6))),
neck=dict(
type='SimMIMLinearDecoder', in_channels=128 * 2**3, encoder_stride=32),
head=dict(
type='SimMIMHead',
patch_size=4,
loss=dict(type='PixelReconstructionLoss', criterion='L1', channel=3)))
# optimizer wrapper
optim_wrapper = dict(
type='AmpOptimWrapper',
optimizer=dict(
type='AdamW',
lr=1e-4 * 2048 / 512,
betas=(0.9, 0.999),
weight_decay=0.05),
clip_grad=dict(max_norm=5.0),
paramwise_cfg=dict(
custom_keys={
'norm': dict(decay_mult=0.0),
'bias': dict(decay_mult=0.0),
'absolute_pos_embed': dict(decay_mult=0.),
'relative_position_bias_table': dict(decay_mult=0.)
}))
# learning rate scheduler
param_scheduler = [
dict(
type='LinearLR',
start_factor=5e-7 / 1e-4,
by_epoch=True,
begin=0,
end=10,
convert_to_iter_based=True),
dict(
type='MultiStepLR',
milestones=[700],
by_epoch=True,
begin=10,
end=800,
convert_to_iter_based=True)
]
# runtime
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=800)
default_hooks = dict(
# only keeps the latest 3 checkpoints
checkpoint=dict(type='CheckpointHook', interval=10, max_keep_ckpts=3))
# NOTE: `auto_scale_lr` is for automatically scaling LR
# based on the actual training batch size.
auto_scale_lr = dict(base_batch_size=2048)
_base_ = [
'../_base_/datasets/imagenet_bs256_simmim_192.py',
'../_base_/default_runtime.py',
]
# model settings
model = dict(
type='SimMIM',
backbone=dict(
type='SimMIMSwinTransformer',
arch='base',
img_size=192,
stage_cfgs=dict(block_cfgs=dict(window_size=6))),
neck=dict(
type='SimMIMLinearDecoder', in_channels=128 * 2**3, encoder_stride=32),
head=dict(
type='SimMIMHead',
patch_size=4,
loss=dict(type='PixelReconstructionLoss', criterion='L1', channel=3)))
# optimizer wrapper
optim_wrapper = dict(
type='AmpOptimWrapper',
optimizer=dict(
type='AdamW',
lr=2e-4 * 2048 / 512,
betas=(0.9, 0.999),
weight_decay=0.05),
clip_grad=dict(max_norm=5.0),
paramwise_cfg=dict(
custom_keys={
'norm': dict(decay_mult=0.0),
'bias': dict(decay_mult=0.0),
'absolute_pos_embed': dict(decay_mult=0.),
'relative_position_bias_table': dict(decay_mult=0.)
}))
# learning rate scheduler
param_scheduler = [
dict(
type='LinearLR',
start_factor=1e-6 / 2e-4,
by_epoch=True,
begin=0,
end=10,
convert_to_iter_based=True),
dict(
type='CosineAnnealingLR',
T_max=90,
eta_min=1e-5 * 2048 / 512,
by_epoch=True,
begin=10,
end=100,
convert_to_iter_based=True)
]
# runtime
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=100)
default_hooks = dict(
# only keeps the latest 3 checkpoints
checkpoint=dict(type='CheckpointHook', interval=10, max_keep_ckpts=3))
# NOTE: `auto_scale_lr` is for automatically scaling LR
# based on the actual training batch size.
auto_scale_lr = dict(base_batch_size=2048)
_base_ = [
'../_base_/datasets/imagenet_bs256_simmim_192.py',
'../_base_/default_runtime.py',
]
# model settings
model = dict(
type='SimMIM',
backbone=dict(
type='SimMIMSwinTransformer',
arch='large',
img_size=192,
stage_cfgs=dict(block_cfgs=dict(window_size=12)),
pad_small_map=True),
neck=dict(
type='SimMIMLinearDecoder', in_channels=192 * 2**3, encoder_stride=32),
head=dict(
type='SimMIMHead',
patch_size=4,
loss=dict(type='PixelReconstructionLoss', criterion='L1', channel=3)))
# optimizer wrapper
optim_wrapper = dict(
type='AmpOptimWrapper',
optimizer=dict(
type='AdamW',
lr=1e-4 * 2048 / 512,
betas=(0.9, 0.999),
weight_decay=0.05),
clip_grad=dict(max_norm=5.0),
paramwise_cfg=dict(
custom_keys={
'norm': dict(decay_mult=0.0),
'bias': dict(decay_mult=0.0),
'absolute_pos_embed': dict(decay_mult=0.),
'relative_position_bias_table': dict(decay_mult=0.)
}))
# learning rate scheduler
param_scheduler = [
dict(
type='LinearLR',
start_factor=5e-7 / 1e-4,
by_epoch=True,
begin=0,
end=10,
convert_to_iter_based=True),
dict(
type='MultiStepLR',
milestones=[700],
by_epoch=True,
begin=10,
end=800,
convert_to_iter_based=True)
]
# runtime
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=800)
default_hooks = dict(
# only keeps the latest 3 checkpoints
checkpoint=dict(type='CheckpointHook', interval=10, max_keep_ckpts=3))
# NOTE: `auto_scale_lr` is for automatically scaling LR
# based on the actual training batch size.
auto_scale_lr = dict(base_batch_size=2048)
# SimSiam
> [Exploring simple siamese representation learning](https://arxiv.org/abs/2011.10566)
<!-- [ALGORITHM] -->
## Abstract
Siamese networks have become a common structure in various recent models for unsupervised visual representation learning. These models maximize the similarity between two augmentations of one image, subject to certain conditions for avoiding collapsing solutions. In this paper, we report surprising empirical results that simple Siamese networks can learn meaningful representations even using none of the following: (i) negative sample pairs, (ii) large batches, (iii) momentum encoders. Our experiments show that collapsing solutions do exist for the loss and structure, but a stop-gradient operation plays an essential role in preventing collapsing. We provide a hypothesis on the implication of stop-gradient, and further show proof-of-concept experiments verifying it. Our “SimSiam” method achieves competitive results on ImageNet and downstream tasks. We hope this simple baseline will motivate people to rethink the roles of Siamese architectures for unsupervised representation learning.
<div align=center>
<img src="https://user-images.githubusercontent.com/36138628/149724180-bc7bac6a-fcb8-421e-b8f1-9550c624d154.png" width="500" />
</div>
## How to use it?
<!-- [TABS-BEGIN] -->
**Predict image**
```python
from mmpretrain import inference_model
predict = inference_model('resnet50_simsiam-100e-pre_8xb512-linear-coslr-90e_in1k', 'demo/bird.JPEG')
print(predict['pred_class'])
print(predict['pred_score'])
```
**Use the model**
```python
import torch
from mmpretrain import get_model
model = get_model('simsiam_resnet50_8xb32-coslr-100e_in1k', pretrained=True)
inputs = torch.rand(1, 3, 224, 224)
out = model(inputs)
print(type(out))
# To extract features.
feats = model.extract_feat(inputs)
print(type(feats))
```
**Train/Test Command**
Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset).
Train:
```shell
python tools/train.py configs/simsiam/simsiam_resnet50_8xb32-coslr-100e_in1k.py
```
Test:
```shell
python tools/test.py configs/simsiam/benchmarks/resnet50_8xb512-linear-coslr-90e_in1k.py https://download.openmmlab.com/mmselfsup/1.x/simsiam/simsiam_resnet50_8xb32-coslr-100e_in1k/resnet50_linear-8xb512-coslr-90e_in1k/resnet50_linear-8xb512-coslr-90e_in1k_20220825-f53ba400.pth
```
<!-- [TABS-END] -->
## Models and results
### Pretrained models
| Model | Params (M) | Flops (G) | Config | Download |
| :--------------------------------------- | :--------: | :-------: | :-------------------------------------------------: | :----------------------------------------------------------------------------------------: |
| `simsiam_resnet50_8xb32-coslr-100e_in1k` | 38.20 | 4.11 | [config](simsiam_resnet50_8xb32-coslr-100e_in1k.py) | [model](https://download.openmmlab.com/mmselfsup/1.x/simsiam/simsiam_resnet50_8xb32-coslr-100e_in1k/simsiam_resnet50_8xb32-coslr-100e_in1k_20220825-d07cb2e6.pth) \| [log](https://download.openmmlab.com/mmselfsup/1.x/simsiam/simsiam_resnet50_8xb32-coslr-100e_in1k/simsiam_resnet50_8xb32-coslr-100e_in1k_20220825-d07cb2e6.json) |
| `simsiam_resnet50_8xb32-coslr-200e_in1k` | 38.20 | 4.11 | [config](simsiam_resnet50_8xb32-coslr-200e_in1k.py) | [model](https://download.openmmlab.com/mmselfsup/1.x/simsiam/simsiam_resnet50_8xb32-coslr-200e_in1k/simsiam_resnet50_8xb32-coslr-200e_in1k_20220825-efe91299.pth) \| [log](https://download.openmmlab.com/mmselfsup/1.x/simsiam/simsiam_resnet50_8xb32-coslr-200e_in1k/simsiam_resnet50_8xb32-coslr-200e_in1k_20220825-efe91299.json) |
### Image Classification on ImageNet-1k
| Model | Pretrain | Params (M) | Flops (G) | Top-1 (%) | Config | Download |
| :---------------------------------------- | :------------------------------------------: | :--------: | :-------: | :-------: | :----------------------------------------: | :-------------------------------------------: |
| `resnet50_simsiam-100e-pre_8xb512-linear-coslr-90e_in1k` | [SIMSIAM 100-Epochs](https://download.openmmlab.com/mmselfsup/1.x/simsiam/simsiam_resnet50_8xb32-coslr-100e_in1k/simsiam_resnet50_8xb32-coslr-100e_in1k_20220825-d07cb2e6.pth) | 25.56 | 4.11 | 68.30 | [config](benchmarks/resnet50_8xb512-linear-coslr-90e_in1k.py) | [model](https://download.openmmlab.com/mmselfsup/1.x/simsiam/simsiam_resnet50_8xb32-coslr-100e_in1k/resnet50_linear-8xb512-coslr-90e_in1k/resnet50_linear-8xb512-coslr-90e_in1k_20220825-f53ba400.pth) \| [log](https://download.openmmlab.com/mmselfsup/1.x/simsiam/simsiam_resnet50_8xb32-coslr-100e_in1k/resnet50_linear-8xb512-coslr-90e_in1k/resnet50_linear-8xb512-coslr-90e_in1k_20220825-f53ba400.json) |
| `resnet50_simsiam-200e-pre_8xb512-linear-coslr-90e_in1k` | [SIMSIAM 200-Epochs](https://download.openmmlab.com/mmselfsup/1.x/simsiam/simsiam_resnet50_8xb32-coslr-200e_in1k/simsiam_resnet50_8xb32-coslr-200e_in1k_20220825-efe91299.pth) | 25.56 | 4.11 | 69.80 | [config](benchmarks/resnet50_8xb512-linear-coslr-90e_in1k.py) | [model](https://download.openmmlab.com/mmselfsup/1.x/simsiam/simsiam_resnet50_8xb32-coslr-200e_in1k/resnet50_linear-8xb512-coslr-90e_in1k/resnet50_linear-8xb512-coslr-90e_in1k_20220825-519b5135.pth) \| [log](https://download.openmmlab.com/mmselfsup/1.x/simsiam/simsiam_resnet50_8xb32-coslr-200e_in1k/resnet50_linear-8xb512-coslr-90e_in1k/resnet50_linear-8xb512-coslr-90e_in1k_20220825-519b5135.json) |
## Citation
```bibtex
@inproceedings{chen2021exploring,
title={Exploring simple siamese representation learning},
author={Chen, Xinlei and He, Kaiming},
booktitle={CVPR},
year={2021}
}
```
_base_ = [
'../../_base_/models/resnet50.py',
'../../_base_/datasets/imagenet_bs32_pil_resize.py',
'../../_base_/schedules/imagenet_lars_coslr_90e.py',
'../../_base_/default_runtime.py',
]
model = dict(
backbone=dict(
frozen_stages=4,
init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')))
# dataset summary
train_dataloader = dict(batch_size=512)
# runtime settings
default_hooks = dict(
checkpoint=dict(type='CheckpointHook', interval=10, max_keep_ckpts=3))
Collections:
- Name: SimSiam
Metadata:
Training Data: ImageNet-1k
Training Techniques:
- SGD with Momentum
- Weight Decay
Training Resources: 8x V100 GPUs
Architecture:
- ResNet
Paper:
Title: Exploring simple siamese representation learning
URL: https://arxiv.org/abs/2011.10566
README: configs/simsiam/README.md
Models:
- Name: simsiam_resnet50_8xb32-coslr-100e_in1k
Metadata:
Epochs: 100
Batch Size: 256
FLOPs: 4109364224
Parameters: 38199360
Training Data: ImageNet-1k
In Collection: SimSiam
Results: null
Weights: https://download.openmmlab.com/mmselfsup/1.x/simsiam/simsiam_resnet50_8xb32-coslr-100e_in1k/simsiam_resnet50_8xb32-coslr-100e_in1k_20220825-d07cb2e6.pth
Config: configs/simsiam/simsiam_resnet50_8xb32-coslr-100e_in1k.py
Downstream:
- resnet50_simsiam-100e-pre_8xb512-linear-coslr-90e_in1k
- Name: simsiam_resnet50_8xb32-coslr-200e_in1k
Metadata:
Epochs: 200
Batch Size: 256
FLOPs: 4109364224
Parameters: 38199360
Training Data: ImageNet-1k
In Collection: SimSiam
Results: null
Weights: https://download.openmmlab.com/mmselfsup/1.x/simsiam/simsiam_resnet50_8xb32-coslr-200e_in1k/simsiam_resnet50_8xb32-coslr-200e_in1k_20220825-efe91299.pth
Config: configs/simsiam/simsiam_resnet50_8xb32-coslr-200e_in1k.py
Downstream:
- resnet50_simsiam-200e-pre_8xb512-linear-coslr-90e_in1k
- Name: resnet50_simsiam-100e-pre_8xb512-linear-coslr-90e_in1k
Metadata:
Epochs: 90
Batch Size: 4096
FLOPs: 4109464576
Parameters: 25557032
Training Data: ImageNet-1k
In Collection: SimSiam
Results:
- Task: Image Classification
Dataset: ImageNet-1k
Metrics:
Top 1 Accuracy: 68.3
Weights: https://download.openmmlab.com/mmselfsup/1.x/simsiam/simsiam_resnet50_8xb32-coslr-100e_in1k/resnet50_linear-8xb512-coslr-90e_in1k/resnet50_linear-8xb512-coslr-90e_in1k_20220825-f53ba400.pth
Config: configs/simsiam/benchmarks/resnet50_8xb512-linear-coslr-90e_in1k.py
- Name: resnet50_simsiam-200e-pre_8xb512-linear-coslr-90e_in1k
Metadata:
Epochs: 90
Batch Size: 4096
FLOPs: 4109464576
Parameters: 25557032
Training Data: ImageNet-1k
In Collection: SimSiam
Results:
- Task: Image Classification
Dataset: ImageNet-1k
Metrics:
Top 1 Accuracy: 69.8
Weights: https://download.openmmlab.com/mmselfsup/1.x/simsiam/simsiam_resnet50_8xb32-coslr-200e_in1k/resnet50_linear-8xb512-coslr-90e_in1k/resnet50_linear-8xb512-coslr-90e_in1k_20220825-519b5135.pth
Config: configs/simsiam/benchmarks/resnet50_8xb512-linear-coslr-90e_in1k.py
_base_ = [
'../_base_/datasets/imagenet_bs32_mocov2.py',
'../_base_/schedules/imagenet_sgd_coslr_200e.py',
'../_base_/default_runtime.py',
]
# model settings
model = dict(
type='SimSiam',
backbone=dict(
type='ResNet',
depth=50,
norm_cfg=dict(type='SyncBN'),
zero_init_residual=True),
neck=dict(
type='NonLinearNeck',
in_channels=2048,
hid_channels=2048,
out_channels=2048,
num_layers=3,
with_last_bn_affine=False,
with_avg_pool=True),
head=dict(
type='LatentPredictHead',
loss=dict(type='CosineSimilarityLoss'),
predictor=dict(
type='NonLinearNeck',
in_channels=2048,
hid_channels=512,
out_channels=2048,
with_avg_pool=False,
with_last_bn=False,
with_last_bias=True)),
)
# optimizer
# set base learning rate
lr = 0.05
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=lr, weight_decay=1e-4, momentum=0.9),
paramwise_cfg=dict(custom_keys={'predictor': dict(fix_lr=True)}))
# learning rate scheduler
param_scheduler = [
dict(type='CosineAnnealingLR', T_max=100, by_epoch=True, begin=0, end=100)
]
# runtime settings
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=100)
default_hooks = dict(
# only keeps the latest 3 checkpoints
checkpoint=dict(type='CheckpointHook', interval=10, max_keep_ckpts=3))
# additional hooks
custom_hooks = [
dict(type='SimSiamHook', priority='HIGH', fix_pred_lr=True, lr=lr)
]
_base_ = [
'../_base_/datasets/imagenet_bs32_mocov2.py',
'../_base_/schedules/imagenet_sgd_coslr_200e.py',
'../_base_/default_runtime.py',
]
# model settings
model = dict(
type='SimSiam',
backbone=dict(
type='ResNet',
depth=50,
norm_cfg=dict(type='SyncBN'),
zero_init_residual=True),
neck=dict(
type='NonLinearNeck',
in_channels=2048,
hid_channels=2048,
out_channels=2048,
num_layers=3,
with_last_bn_affine=False,
with_avg_pool=True),
head=dict(
type='LatentPredictHead',
loss=dict(type='CosineSimilarityLoss'),
predictor=dict(
type='NonLinearNeck',
in_channels=2048,
hid_channels=512,
out_channels=2048,
with_avg_pool=False,
with_last_bn=False,
with_last_bias=True)),
)
# optimizer
# set base learning rate
lr = 0.05
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=lr, weight_decay=1e-4, momentum=0.9),
paramwise_cfg=dict(custom_keys={'predictor': dict(fix_lr=True)}))
# runtime settings
default_hooks = dict(
# only keeps the latest 3 checkpoints
checkpoint=dict(type='CheckpointHook', interval=10, max_keep_ckpts=3))
# additional hooks
custom_hooks = [
dict(type='SimSiamHook', priority='HIGH', fix_pred_lr=True, lr=lr)
]
# SparK
> [Designing BERT for Convolutional Networks: Sparse and Hierarchical Masked Modeling](https://arxiv.org/abs/2301.03580)
<!-- [ALGORITHM] -->
## Abstract
We identify and overcome two key obstacles in extending the success of BERT-style pre-training, or the masked image modeling, to convolutional networks (convnets): (i) convolution operation cannot handle irregular, random-masked input images; (ii) the single-scale nature of BERT pre-training is inconsistent with convnet's hierarchical structure. For (i), we treat unmasked pixels as sparse voxels of 3D point clouds and use sparse convolution to encode. This is the first use of sparse convolution for 2D masked modeling. For (ii), we develop a hierarchical decoder to reconstruct images from multi-scale encoded features. Our method called Sparse masKed modeling (SparK) is general: it can be used directly on any convolutional model without backbone modifications. We validate it on both classical (ResNet) and modern (ConvNeXt) models: on three downstream tasks, it surpasses both state-of-the-art contrastive learning and transformer-based masked modeling by similarly large margins (around +1.0%). Improvements on object detection and instance segmentation are more substantial (up to +3.5%), verifying the strong transferability of features learned. We also find its favorable scaling behavior by observing more gains on larger models. All this evidence reveals a promising future of generative pre-training on convnets. Codes and models are released at https://github.com/keyu-tian/SparK.
<div align=center>
<img src="https://github.com/open-mmlab/mmpretrain/assets/36138628/b93e8d6f-ec1e-4f27-b986-da470fabe7df" width="80%"/>
</div>
## How to use it?
<!-- [TABS-BEGIN] -->
**Predict image**
```python
from mmpretrain import inference_model
predict = inference_model('resnet50_spark-pre_300e_in1k', 'demo/bird.JPEG')
print(predict['pred_class'])
print(predict['pred_score'])
```
**Use the model**
```python
import torch
from mmpretrain import get_model
model = get_model('spark_sparse-resnet50_800e_in1k', pretrained=True)
inputs = torch.rand(1, 3, 224, 224)
out = model(inputs)
print(type(out))
# To extract features.
feats = model.extract_feat(inputs)
print(type(feats))
```
**Train/Test Command**
Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset).
Train:
```shell
python tools/train.py configs/spark/spark_sparse-resnet50_8xb512-amp-coslr-800e_in1k.py
```
Test:
```shell
python tools/test.py configs/spark/benchmarks/resnet50_8xb256-coslr-300e_in1k.py https://download.openmmlab.com/mmpretrain/v1.0/spark/spark_sparse-resnet50_8xb512-amp-coslr-800e_in1k/resnet50_8xb256-coslr-300e_in1k/resnet50_8xb256-coslr-300e_in1k_20230612-f86aab51.pth
```
<!-- [TABS-END] -->
## Models and results
### Pretrained models
| Model | Params (M) | Flops (G) | Config | Download |
| :--------------------------------------- | :--------: | :-------: | :-------------------------------------------------------------------: | :----------------------------------------------------------------------: |
| `spark_sparse-resnet50_800e_in1k` | 37.97 | 4.10 | [config](spark_sparse-resnet50_8xb512-amp-coslr-800e_in1k.py) | [model](https://download.openmmlab.com/mmpretrain/v1.0/spark/spark_sparse-resnet50_8xb512-amp-coslr-800e_in1k/spark_sparse-resnet50_8xb512-amp-coslr-800e_in1k_20230612-e403c28f.pth) \| [log](https://download.openmmlab.com/mmpretrain/v1.0/spark/spark_sparse-resnet50_8xb512-amp-coslr-800e_in1k/spark_sparse-resnet50_8xb512-amp-coslr-800e_in1k_20230612-e403c28f.json) |
| `spark_sparse-convnextv2-tiny_800e_in1k` | 39.73 | 4.47 | [config](spark_sparse-convnextv2-tiny_16xb256-amp-coslr-800e_in1k.py) | [model](https://download.openmmlab.com/mmpretrain/v1.0/spark/spark_sparse-convnextv2-tiny_16xb256-amp-coslr-800e_in1k/spark_sparse-convnextv2-tiny_16xb256-amp-coslr-800e_in1k_20230612-b0ea712e.pth) \| [log](https://download.openmmlab.com/mmpretrain/v1.0/spark/spark_sparse-convnextv2-tiny_16xb256-amp-coslr-800e_in1k/spark_sparse-convnextv2-tiny_16xb256-amp-coslr-800e_in1k_20230612-b0ea712e.json) |
### Image Classification on ImageNet-1k
| Model | Pretrain | Params (M) | Flops (G) | Top-1 (%) | Top-5 (%) | Config | Download |
| :------------------------------------ | :----------------------------------------: | :--------: | :-------: | :-------: | :-------: | :---------------------------------------: | :-----------------------------------------: |
| `resnet50_spark-pre_300e_in1k` | [SPARK](https://download.openmmlab.com/mmpretrain/v1.0/spark/spark_sparse-resnet50_8xb512-amp-coslr-800e_in1k/spark_sparse-resnet50_8xb512-amp-coslr-800e_in1k_20230612-e403c28f.pth) | 23.52 | 1.31 | 80.10 | 94.90 | [config](benchmarks/resnet50_8xb256-coslr-300e_in1k.py) | [model](https://download.openmmlab.com/mmpretrain/v1.0/spark/spark_sparse-resnet50_8xb512-amp-coslr-800e_in1k/resnet50_8xb256-coslr-300e_in1k/resnet50_8xb256-coslr-300e_in1k_20230612-f86aab51.pth) \| [log](https://download.openmmlab.com/mmpretrain/v1.0/spark/spark_sparse-resnet50_8xb512-amp-coslr-800e_in1k/resnet50_8xb256-coslr-300e_in1k/resnet50_8xb256-coslr-300e_in1k_20230612-f86aab51.json) |
| `convnextv2-tiny_spark-pre_300e_in1k` | [SPARK](https://download.openmmlab.com/mmpretrain/v1.0/spark/spark_sparse-convnextv2-tiny_16xb256-amp-coslr-800e_in1k/spark_sparse-convnextv2-tiny_16xb256-amp-coslr-800e_in1k_20230612-b0ea712e.pth) | 28.64 | 4.47 | 82.80 | 96.30 | [config](benchmarks/convnextv2-tiny_8xb256-coslr-300e_in1k.py) | [model](https://download.openmmlab.com/mmpretrain/v1.0/spark//spark_sparse-convnextv2-tiny_16xb256-amp-coslr-800e_in1k/convnextv2-tiny_8xb256-coslr-300e_in1k/convnextv2-tiny_8xb256-coslr-300e_in1k_20230612-ffc78743.pth) \| [log](https://download.openmmlab.com/mmpretrain/v1.0/spark//spark_sparse-convnextv2-tiny_16xb256-amp-coslr-800e_in1k/convnextv2-tiny_8xb256-coslr-300e_in1k/convnextv2-tiny_8xb256-coslr-300e_in1k_20230612-ffc78743.json) |
## Citation
```bibtex
@Article{tian2023designing,
author = {Keyu Tian and Yi Jiang and Qishuai Diao and Chen Lin and Liwei Wang and Zehuan Yuan},
title = {Designing BERT for Convolutional Networks: Sparse and Hierarchical Masked Modeling},
journal = {arXiv:2301.03580},
year = {2023},
}
```
_base_ = [
'../../_base_/datasets/imagenet_bs64_swin_224.py',
'../../_base_/default_runtime.py',
]
data_preprocessor = dict(
num_classes=1000,
# RGB format normalization parameters
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
# convert image from BGR to RGB
to_rgb=True,
)
bgr_mean = data_preprocessor['mean'][::-1]
bgr_std = data_preprocessor['std'][::-1]
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='RandomResizedCrop',
scale=224,
backend='pillow',
interpolation='bicubic'),
dict(type='RandomFlip', prob=0.5, direction='horizontal'),
dict(type='NumpyToPIL', to_rgb=True),
dict(
type='torchvision/TrivialAugmentWide',
num_magnitude_bins=31,
interpolation='bicubic',
fill=None),
dict(type='PILToNumpy', to_bgr=True),
dict(
type='RandomErasing',
erase_prob=0.25,
mode='rand',
min_area_ratio=0.02,
max_area_ratio=1 / 3,
fill_color=bgr_mean,
fill_std=bgr_std),
dict(type='PackInputs'),
]
train_dataloader = dict(
dataset=dict(pipeline=train_pipeline),
sampler=dict(type='RepeatAugSampler', shuffle=True),
)
# Model settings
model = dict(
type='ImageClassifier',
backbone=dict(
type='ConvNeXt',
arch='tiny',
drop_path_rate=0.1,
layer_scale_init_value=0.,
use_grn=True,
init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')),
head=dict(
type='LinearClsHead',
num_classes=1000,
in_channels=768,
loss=dict(
type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'),
init_cfg=dict(type='TruncNormal', layer='Linear', std=.02, bias=0.),
),
train_cfg=dict(augments=[
dict(type='Mixup', alpha=0.8),
dict(type='CutMix', alpha=1.0),
]),
)
custom_hooks = [
dict(
type='EMAHook',
momentum=1e-4,
evaluate_on_origin=True,
priority='ABOVE_NORMAL')
]
# schedule settings
# optimizer
optim_wrapper = dict(
optimizer=dict(
type='AdamW', lr=3.2e-3, betas=(0.9, 0.999), weight_decay=0.05),
constructor='LearningRateDecayOptimWrapperConstructor',
paramwise_cfg=dict(
layer_decay_rate=0.7,
norm_decay_mult=0.0,
bias_decay_mult=0.0,
flat_decay_mult=0.0))
# learning policy
param_scheduler = [
# warm up learning rate scheduler
dict(
type='LinearLR',
start_factor=0.0001,
by_epoch=True,
begin=0,
end=20,
convert_to_iter_based=True),
# main learning rate scheduler
dict(
type='CosineAnnealingLR',
T_max=280,
eta_min=1.0e-5,
by_epoch=True,
begin=20,
end=300)
]
train_cfg = dict(by_epoch=True, max_epochs=300)
val_cfg = dict()
test_cfg = dict()
default_hooks = dict(
# only keeps the latest 2 checkpoints
checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=2))
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# based on the actual training batch size.
auto_scale_lr = dict(base_batch_size=2048)
_base_ = [
'../../_base_/models/resnet50.py',
'../../_base_/datasets/imagenet_bs256_rsb_a12.py',
'../../_base_/default_runtime.py'
]
# modification is based on ResNets RSB settings
data_preprocessor = dict(
num_classes=1000,
# RGB format normalization parameters
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
# convert image from BGR to RGB
to_rgb=True,
)
bgr_mean = data_preprocessor['mean'][::-1]
bgr_std = data_preprocessor['std'][::-1]
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='RandomResizedCrop',
scale=224,
backend='pillow',
interpolation='bicubic'),
dict(type='RandomFlip', prob=0.5, direction='horizontal'),
dict(type='NumpyToPIL', to_rgb=True),
dict(
type='torchvision/TrivialAugmentWide',
num_magnitude_bins=31,
interpolation='bicubic',
fill=None),
dict(type='PILToNumpy', to_bgr=True),
dict(
type='RandomErasing',
erase_prob=0.25,
mode='rand',
min_area_ratio=0.02,
max_area_ratio=1 / 3,
fill_color=bgr_mean,
fill_std=bgr_std),
dict(type='PackInputs'),
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# model settings
model = dict(
backbone=dict(
norm_cfg=dict(type='SyncBN', requires_grad=True),
drop_path_rate=0.05,
init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')),
head=dict(
loss=dict(
type='LabelSmoothLoss', label_smooth_val=0.1, use_sigmoid=True)),
train_cfg=dict(augments=[
dict(type='Mixup', alpha=0.1),
dict(type='CutMix', alpha=1.0)
]))
# schedule settings
# optimizer
optim_wrapper = dict(
optimizer=dict(
type='Lamb',
lr=0.016,
weight_decay=0.02,
),
constructor='LearningRateDecayOptimWrapperConstructor',
paramwise_cfg=dict(
layer_decay_rate=0.7,
norm_decay_mult=0.0,
bias_decay_mult=0.0,
flat_decay_mult=0.0))
# learning policy
param_scheduler = [
# warm up learning rate scheduler
dict(
type='LinearLR',
start_factor=0.0001,
by_epoch=True,
begin=0,
end=5,
# update by iter
convert_to_iter_based=True),
# main learning rate scheduler
dict(
type='CosineAnnealingLR',
T_max=295,
eta_min=1.0e-6,
by_epoch=True,
begin=5,
end=300)
]
train_cfg = dict(by_epoch=True, max_epochs=300)
val_cfg = dict()
test_cfg = dict()
default_hooks = dict(
# only keeps the latest 2 checkpoints
checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=2))
# randomness
randomness = dict(seed=0, diff_rank_seed=True)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# based on the actual training batch size.
auto_scale_lr = dict(base_batch_size=2048)
Collections:
- Name: SparK
Metadata:
Architecture:
- Dense Connections
- GELU
- Layer Normalization
- Multi-Head Attention
- Scaled Dot-Product Attention
Paper:
Title: 'Designing BERT for Convolutional Networks: Sparse and Hierarchical Masked Modeling'
URL: https://arxiv.org/abs/2301.03580
README: configs/spark/README.md
Code:
URL: null
Version: null
Models:
- Name: spark_sparse-resnet50_800e_in1k
Metadata:
FLOPs: 4100000000
Parameters: 37971000
Training Data:
- ImageNet-1k
In Collection: SparK
Results: null
Weights: https://download.openmmlab.com/mmpretrain/v1.0/spark/spark_sparse-resnet50_8xb512-amp-coslr-800e_in1k/spark_sparse-resnet50_8xb512-amp-coslr-800e_in1k_20230612-e403c28f.pth
Config: configs/spark/spark_sparse-resnet50_8xb512-amp-coslr-800e_in1k.py
Downstream:
- resnet50_spark-pre_300e_in1k
- Name: resnet50_spark-pre_300e_in1k
Metadata:
FLOPs: 1310000000
Parameters: 23520000
Training Data:
- ImageNet-1k
In Collection: SparK
Results:
- Dataset: ImageNet-1k
Metrics:
Top 1 Accuracy: 80.1
Top 5 Accuracy: 94.9
Task: Image Classification
Weights: https://download.openmmlab.com/mmpretrain/v1.0/spark/spark_sparse-resnet50_8xb512-amp-coslr-800e_in1k/resnet50_8xb256-coslr-300e_in1k/resnet50_8xb256-coslr-300e_in1k_20230612-f86aab51.pth
Config: configs/spark/benchmarks/resnet50_8xb256-coslr-300e_in1k.py
- Name: spark_sparse-convnextv2-tiny_800e_in1k
Metadata:
FLOPs: 4470000000
Parameters: 39732000
Training Data:
- ImageNet-1k
In Collection: SparK
Results: null
Weights: https://download.openmmlab.com/mmpretrain/v1.0/spark/spark_sparse-convnextv2-tiny_16xb256-amp-coslr-800e_in1k/spark_sparse-convnextv2-tiny_16xb256-amp-coslr-800e_in1k_20230612-b0ea712e.pth
Config: configs/spark/spark_sparse-convnextv2-tiny_16xb256-amp-coslr-800e_in1k.py
Downstream:
- convnextv2-tiny_spark-pre_300e_in1k
- Name: convnextv2-tiny_spark-pre_300e_in1k
Metadata:
FLOPs: 4469631744
Parameters: 28635496
Training Data:
- ImageNet-1k
In Collection: SparK
Results:
- Dataset: ImageNet-1k
Metrics:
Top 1 Accuracy: 82.8
Top 5 Accuracy: 96.3
Task: Image Classification
Weights: https://download.openmmlab.com/mmpretrain/v1.0/spark//spark_sparse-convnextv2-tiny_16xb256-amp-coslr-800e_in1k/convnextv2-tiny_8xb256-coslr-300e_in1k/convnextv2-tiny_8xb256-coslr-300e_in1k_20230612-ffc78743.pth
Config: configs/spark/benchmarks/convnextv2-tiny_8xb256-coslr-300e_in1k.py
_base_ = [
'../_base_/datasets/imagenet_bs512_mae.py',
'../_base_/default_runtime.py',
]
# dataset 8 x 512
train_dataloader = dict(batch_size=256, num_workers=8)
# model settings
model = dict(
type='SparK',
input_size=224,
downsample_raito=32,
mask_ratio=0.6,
enc_dec_norm_cfg=dict(type='SparseLN2d', eps=1e-6),
enc_dec_norm_dim=768,
backbone=dict(
type='SparseConvNeXt',
arch='small',
drop_path_rate=0.2,
out_indices=(0, 1, 2, 3),
gap_before_output=False),
neck=dict(
type='SparKLightDecoder',
feature_dim=512,
upsample_ratio=32, # equal to downsample_raito
mid_channels=0,
last_act=False),
head=dict(
type='SparKPretrainHead',
loss=dict(type='PixelReconstructionLoss', criterion='L2')))
# optimizer wrapper
optimizer = dict(
type='Lamb', lr=2e-4 * 4096 / 512, betas=(0.9, 0.95), weight_decay=0.04)
optim_wrapper = dict(
type='AmpOptimWrapper',
optimizer=optimizer,
clip_grad=dict(max_norm=5.0),
paramwise_cfg=dict(
bias_decay_mult=0.0,
flat_decay_mult=0.0,
custom_keys={
'mask_token': dict(decay_mult=0.),
}))
# learning rate scheduler
param_scheduler = [
dict(
type='LinearLR',
start_factor=1e-4,
by_epoch=True,
begin=0,
end=40,
convert_to_iter_based=True),
dict(
type='CosineAnnealingLR',
T_max=760,
by_epoch=True,
begin=40,
end=800,
convert_to_iter_based=True),
dict(
type='CosineAnnealingWeightDecay',
eta_min=0.2,
T_max=800,
by_epoch=True,
begin=0,
end=800,
convert_to_iter_based=True)
]
# runtime settings
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=800)
default_hooks = dict(
logger=dict(type='LoggerHook', interval=100),
# only keeps the latest 3 checkpoints
checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=2))
# randomness
randomness = dict(seed=0, diff_rank_seed=True)
_base_ = [
'../_base_/datasets/imagenet_bs512_mae.py',
'../_base_/default_runtime.py',
]
# dataset 16 x 256
train_dataloader = dict(batch_size=256, num_workers=8)
# model settings, use ConvNeXt V2
model = dict(
type='SparK',
input_size=224,
downsample_raito=32,
mask_ratio=0.6,
enc_dec_norm_cfg=dict(type='SparseLN2d', eps=1e-6),
enc_dec_norm_dim=768,
backbone=dict(
type='SparseConvNeXt',
arch='tiny',
drop_path_rate=0.2,
out_indices=(0, 1, 2, 3),
gap_before_output=False,
layer_scale_init_value=0.,
use_grn=True,
),
neck=dict(
type='SparKLightDecoder',
feature_dim=512,
upsample_ratio=32, # equal to downsample_raito
mid_channels=0,
last_act=False),
head=dict(
type='SparKPretrainHead',
loss=dict(type='PixelReconstructionLoss', criterion='L2')))
# optimizer wrapper
optimizer = dict(
type='Lamb', lr=2e-4 * 4096 / 512, betas=(0.9, 0.95), weight_decay=0.04)
optim_wrapper = dict(
type='AmpOptimWrapper',
optimizer=optimizer,
clip_grad=dict(max_norm=5.0),
paramwise_cfg=dict(
bias_decay_mult=0.0,
flat_decay_mult=0.0,
custom_keys={
'mask_token': dict(decay_mult=0.),
}))
# learning rate scheduler
param_scheduler = [
dict(
type='LinearLR',
start_factor=1e-4,
by_epoch=True,
begin=0,
end=20,
convert_to_iter_based=True),
dict(
type='CosineAnnealingLR',
T_max=780,
by_epoch=True,
begin=20,
end=800,
convert_to_iter_based=True),
dict(
type='CosineAnnealingWeightDecay',
eta_min=0.2,
T_max=800,
by_epoch=True,
begin=0,
end=800,
convert_to_iter_based=True)
]
# runtime settings
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=800)
default_hooks = dict(
logger=dict(type='LoggerHook', interval=100),
# only keeps the latest 3 checkpoints
checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=2))
# randomness
randomness = dict(seed=0, diff_rank_seed=True)
_base_ = 'spark_sparse-resnet50_8xb512-amp-coslr-800e_in1k.py'
# learning rate scheduler
param_scheduler = [
dict(
type='LinearLR',
start_factor=1e-4,
by_epoch=True,
begin=0,
end=40,
convert_to_iter_based=True),
dict(
type='CosineAnnealingLR',
T_max=1560,
by_epoch=True,
begin=40,
end=1600,
convert_to_iter_based=True),
dict(
type='CosineAnnealingWeightDecay',
eta_min=0.2,
T_max=1600,
by_epoch=True,
begin=0,
end=1600,
convert_to_iter_based=True)
]
# runtime settings
train_cfg = dict(max_epochs=1600)
_base_ = [
'../_base_/datasets/imagenet_bs512_mae.py',
'../_base_/default_runtime.py',
]
# dataset 8 x 512
train_dataloader = dict(batch_size=512, num_workers=8)
# model settings
model = dict(
type='SparK',
input_size=224,
downsample_raito=32,
mask_ratio=0.6,
enc_dec_norm_cfg=dict(type='SparseSyncBatchNorm2d'),
enc_dec_norm_dim=2048,
backbone=dict(
type='SparseResNet',
depth=50,
out_indices=(0, 1, 2, 3),
drop_path_rate=0.05),
neck=dict(
type='SparKLightDecoder',
feature_dim=512,
upsample_ratio=32, # equal to downsample_raito
mid_channels=0,
last_act=False),
head=dict(
type='SparKPretrainHead',
loss=dict(type='PixelReconstructionLoss', criterion='L2')))
# optimizer wrapper
optimizer = dict(
type='Lamb', lr=2e-4 * 4096 / 512, betas=(0.9, 0.95), weight_decay=0.04)
optim_wrapper = dict(
type='AmpOptimWrapper',
optimizer=optimizer,
clip_grad=dict(max_norm=5.0),
paramwise_cfg=dict(
bias_decay_mult=0.0,
flat_decay_mult=0.0,
custom_keys={
'mask_token': dict(decay_mult=0.),
}))
# learning rate scheduler
param_scheduler = [
dict(
type='LinearLR',
start_factor=1e-4,
by_epoch=True,
begin=0,
end=40,
convert_to_iter_based=True),
dict(
type='CosineAnnealingLR',
T_max=760,
by_epoch=True,
begin=40,
end=800,
convert_to_iter_based=True),
dict(
type='CosineAnnealingWeightDecay',
eta_min=0.2,
T_max=800,
by_epoch=True,
begin=0,
end=800,
convert_to_iter_based=True)
]
# runtime settings
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=800)
default_hooks = dict(
logger=dict(type='LoggerHook', interval=100),
# only keeps the latest 3 checkpoints
checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=2))
# randomness
randomness = dict(seed=0, diff_rank_seed=True)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment