Commit 495d9ed9 authored by limm's avatar limm
Browse files

add part code

parent 59b09903
Pipeline #2799 canceled with stages
_base_ = [
'../_base_/datasets/coco_okvqa.py',
'../_base_/default_runtime.py',
]
# model settings
model = dict(
type='BlipVQA',
tokenizer=dict(type='BlipTokenizer', name_or_path='bert-base-uncased'),
vision_backbone=dict(
type='VisionTransformer',
arch='b',
img_size=480,
patch_size=16,
out_type='raw'),
multimodal_backbone=dict(
type='XBertEncoder',
med_config=dict(
architectures=['BertModel'],
attention_probs_dropout_prob=0.1,
hidden_act='gelu',
hidden_dropout_prob=0.1,
hidden_size=768,
initializer_range=0.02,
intermediate_size=3072,
layer_norm_eps=1e-12,
max_position_embeddings=512,
model_type='bert',
num_attention_heads=12,
num_hidden_layers=12,
pad_token_id=0,
add_type_embeddings=False,
vocab_size=30524,
encoder_width=768,
add_cross_attention=True),
),
head=dict(
type='VQAGenerationHead',
decoder=dict(
type='XBertLMHeadDecoder',
med_config=dict(
architectures=['BertModel'],
attention_probs_dropout_prob=0.1,
hidden_act='gelu',
hidden_dropout_prob=0.1,
hidden_size=768,
initializer_range=0.02,
intermediate_size=3072,
layer_norm_eps=1e-12,
max_position_embeddings=512,
model_type='bert',
num_attention_heads=12,
num_hidden_layers=12,
pad_token_id=0,
add_type_embeddings=False,
vocab_size=30524,
encoder_width=768,
add_cross_attention=True),
),
inference_method='generate',
),
)
# schedule settings
optimizer = dict(type='AdamW', lr=2e-5, weight_decay=0.05)
optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer)
param_scheduler = [dict(type='CosineAnnealingLR', by_epoch=True)]
train_cfg = dict(max_epochs=10, by_epoch=True)
val_cfg = dict()
test_cfg = dict()
# runtime settings
randomness = dict(seed=42)
_base_ = [
'../_base_/datasets/coco_retrieval.py',
'../_base_/default_runtime.py',
]
# model settings
model = dict(
type='BlipRetrieval',
tokenizer=dict(type='BlipTokenizer', name_or_path='bert-base-uncased'),
vision_backbone=dict(
type='VisionTransformer',
arch='b',
img_size=384,
patch_size=16,
out_type='raw',
),
text_backbone=dict(
type='XBertEncoder',
med_config=dict(
architectures=['BertModel'],
attention_probs_dropout_prob=0.1,
hidden_act='gelu',
hidden_dropout_prob=0.1,
hidden_size=768,
initializer_range=0.02,
intermediate_size=3072,
layer_norm_eps=1e-12,
max_position_embeddings=512,
model_type='bert',
num_attention_heads=12,
num_hidden_layers=12,
pad_token_id=0,
add_type_embeddings=False,
vocab_size=30524,
encoder_width=768,
add_cross_attention=True),
),
vision_neck=dict(
type='Linear',
in_features=768,
out_features=256,
),
text_neck=dict(
type='Linear',
in_features=768,
out_features=256,
),
head=dict(
type='ITCHead',
embed_dim=256,
),
multimodal_head=dict(
type='ITMHead',
hidden_size=768,
with_pooler=False,
),
topk=256,
max_txt_len=35,
)
# optimizer
optimizer = dict(type='AdamW', lr=2e-5, weight_decay=0.04)
optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer)
# learning rate scheduler
param_scheduler = [dict(type='CosineAnnealingLR', by_epoch=True)]
# runtime settings
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=6)
val_cfg = dict(type='RetrievalValLoop')
test_cfg = dict(type='RetrievalTestLoop')
randomness = dict(seed=42)
default_hooks = dict(logger=dict(interval=1))
custom_hooks = [
dict(
type='WarmupParamHook',
param_name='alpha',
module_name='head',
warmup_epochs=2)
]
_base_ = [
'../_base_/datasets/flickr30k_retrieval.py',
'../_base_/default_runtime.py',
]
# model settings
model = dict(
type='BlipRetrieval',
tokenizer=dict(type='BlipTokenizer', name_or_path='bert-base-uncased'),
vision_backbone=dict(
type='VisionTransformer',
arch='b',
img_size=384,
patch_size=16,
out_type='raw',
),
text_backbone=dict(
type='XBertEncoder',
med_config=dict(
architectures=['BertModel'],
attention_probs_dropout_prob=0.1,
hidden_act='gelu',
hidden_dropout_prob=0.1,
hidden_size=768,
initializer_range=0.02,
intermediate_size=3072,
layer_norm_eps=1e-12,
max_position_embeddings=512,
model_type='bert',
num_attention_heads=12,
num_hidden_layers=12,
pad_token_id=0,
add_type_embeddings=False,
vocab_size=30524,
encoder_width=768,
add_cross_attention=True),
),
vision_neck=dict(
type='Linear',
in_features=768,
out_features=256,
),
text_neck=dict(
type='Linear',
in_features=768,
out_features=256,
),
head=dict(
type='ITCHead',
embed_dim=256,
),
multimodal_head=dict(
type='ITMHead',
hidden_size=768,
with_pooler=False,
),
topk=256,
max_txt_len=35,
)
# optimizer
optimizer = dict(type='AdamW', lr=2e-5, weight_decay=0.04)
optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer)
# learning rate scheduler
param_scheduler = [dict(type='CosineAnnealingLR', by_epoch=True)]
# runtime settings
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=6)
val_cfg = dict(type='RetrievalValLoop')
test_cfg = dict(type='RetrievalTestLoop')
randomness = dict(seed=42)
default_hooks = dict(logger=dict(interval=1))
custom_hooks = [
dict(
type='WarmupParamHook',
param_name='alpha',
module_name='head',
warmup_epochs=2)
]
_base_ = [
'../_base_/datasets/coco_vg_vqa.py',
'../_base_/default_runtime.py',
]
# model settings
model = dict(
type='BlipVQA',
tokenizer=dict(type='BlipTokenizer', name_or_path='bert-base-uncased'),
vision_backbone=dict(
type='VisionTransformer',
arch='b',
img_size=480,
patch_size=16,
out_type='raw'),
multimodal_backbone=dict(
type='XBertEncoder',
med_config=dict(
architectures=['BertModel'],
attention_probs_dropout_prob=0.1,
hidden_act='gelu',
hidden_dropout_prob=0.1,
hidden_size=768,
initializer_range=0.02,
intermediate_size=3072,
layer_norm_eps=1e-12,
max_position_embeddings=512,
model_type='bert',
num_attention_heads=12,
num_hidden_layers=12,
pad_token_id=0,
add_type_embeddings=False,
vocab_size=30524,
encoder_width=768,
add_cross_attention=True),
),
head=dict(
type='VQAGenerationHead',
decoder=dict(
type='XBertLMHeadDecoder',
med_config=dict(
architectures=['BertModel'],
attention_probs_dropout_prob=0.1,
hidden_act='gelu',
hidden_dropout_prob=0.1,
hidden_size=768,
initializer_range=0.02,
intermediate_size=3072,
layer_norm_eps=1e-12,
max_position_embeddings=512,
model_type='bert',
num_attention_heads=12,
num_hidden_layers=12,
pad_token_id=0,
add_type_embeddings=False,
vocab_size=30524,
encoder_width=768,
add_cross_attention=True),
),
inference_method='rank', # or 'generate'
answer_list_path=
'https://storage.googleapis.com/sfr-vision-language-research/datasets/answer_list.json', # noqa: E501
),
)
# schedule settings
optimizer = dict(type='AdamW', lr=2e-5, weight_decay=0.05)
optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer)
param_scheduler = [dict(type='CosineAnnealingLR', by_epoch=True)]
train_cfg = dict(max_epochs=10, by_epoch=True)
test_cfg = dict()
# runtime settings
randomness = dict(seed=42)
Collections:
- Name: BLIP
Metadata:
Training Data:
- COCO
- VG
- Conceptual Captions
- Conceptual 12M
- SBU captions
Architecture:
- Transformer
Training Resources: 8x A100 GPUs
Paper:
Title: 'BLIP: Bootstrapping Language-Image Pre-training for Unified Vision-Language
Understanding and Generation'
URL: https://arxiv.org/abs/2201.12086
README: configs/blip/README.md
Models:
- Name: blip-base_8xb16_refcoco
Metadata:
FLOPs: null
Parameters: 498488636
In Collection: BLIP
Results:
- Task: Visual Grounding
Dataset: RefCOCO
Metrics:
Accuracy (testA): 86.14
Accuracy (testB): 77.33
Weights: https://download.openmmlab.com/mmclassification/v1/blip/blip-base_8xb16_refcoco_20230508-d2d10f4c.pth
Config: configs/blip/blip-base_8xb16_refcoco.py
- Name: blip-base_3rdparty_caption
Metadata:
FLOPs: null
Parameters: 223971644
In Collection: BLIP
Results:
- Dataset: COCO
Task: Image Caption
Metrics:
BLEU-4: 40.12
CIDER: 132.82
Weights: https://download.openmmlab.com/mmclassification/v1/blip/blip-base_3rdparty_coco-caption_20230419-a5b71af3.pth
Config: configs/blip/blip-base_8xb32_caption.py
Converted From:
Weights: https://storage.googleapis.com/sfr-vision-language-research/LAVIS/models/BLIP/blip_coco_caption_base.pth
Code: https://github.com/salesforce/LAVIS
- Name: blip-base_3rdparty_nlvr
Metadata:
FLOPs: null
Parameters: 259372034
In Collection: BLIP
Results:
- Task: NLVR
Dataset: NLVR2
Metrics:
Top 1 Accuracy: 82.33
Weights: https://download.openmmlab.com/mmclassification/v1/blip/blip-base_3rdparty_nlvr_20230427-3b14d33f.pth
Config: configs/blip/blip-base_8xb32_nlvr.py
Converted From:
Weights: https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_nlvr.pth
Code: https://github.com/salesforce/LAVIS
- Name: blip-base_3rdparty_vqa
Metadata:
FLOPs: null
Parameters: 361478972
In Collection: BLIP
Results:
- Task: Visual Question Answering
Dataset: VQAv2
Metrics:
Accuracy: 78.2
Weights: https://download.openmmlab.com/mmclassification/v1/blip/blip-base_3rdparty-capflit_vqa_20230505-81488941.pth
Config: configs/blip/blip-base_8xb32_vqa.py
Converted From:
Weights: https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth
Code: https://github.com/salesforce/LAVIS
- Name: blip-base_3rdparty_retrieval
Metadata:
FLOPs: null
Parameters: 447486979
In Collection: BLIP
Results:
- Task: Image-To-Text Retrieval
Dataset: COCO
Metrics:
Recall@1: 82.52
Recall@5: 95.34
- Task: Text-To-Image Retrieval
Dataset: COCO
Metrics:
Recall@1: 64.82
Recall@5: 86.28
Weights: https://download.openmmlab.com/mmclassification/v1/blip/blip-base_3rdparty_coco-retrieval_20230419-a1804d2c.pth
Config: configs/blip/blip-base_8xb32_retrieval.py
Converted From:
Weights: https://storage.googleapis.com/sfr-vision-language-research/LAVIS/models/BLIP/blip_coco_retrieval.pth
Code: https://github.com/salesforce/LAVIS
# BLIP-2
> [BLIP-2: Bootstrapping Language-Image Pre-training with Frozen Image Encoders and Large Language Models](http://arxiv.org/abs/2301.12597)
<!-- [ALGORITHM] -->
## Abstract
The cost of vision-and-language pre-training has become increasingly prohibitive due to end-toend training of large-scale models. This paper proposes BLIP-2, a generic and efficient pretraining strategy that bootstraps vision-language pre-training from off-the-shelf frozen pre-trained image encoders and frozen large language models. BLIP-2 bridges the modality gap with a lightweight Querying Transformer, which is pretrained in two stages. The first stage bootstraps vision-language representation learning from a frozen image encoder. The second stage bootstraps vision-to-language generative learning from a frozen language model. BLIP-2 achieves state-of-the-art performance on various visionlanguage tasks, despite having significantly fewer trainable parameters than existing methods. For example, our model outperforms Flamingo80B by 8.7% on zero-shot VQAv2 with 54x fewer trainable parameters. We also demonstrate the model’s emerging capabilities of zero-shot image-to-text generation that can follow natural language instructions.
<div align=center>
<img src="https://user-images.githubusercontent.com/30762564/236385045-dc22a621-0a9c-4352-afa4-ca3888044850.png" width="70%"/>
</div>
## How to use it?
<!-- [TABS-BEGIN] -->
**Use the model**
```python
from mmpretrain import inference_model
result = inference_model('blip2-opt2.7b_3rdparty-zeroshot_caption', 'demo/cat-dog.png')
print(result)
# {'pred_caption': 'a dog and a cat sitting on a blanket'}
```
**Test Command**
Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset).
Test:
```shell
python tools/test.py configs/blip2/blip2_8xb32_retrieval.py https://download.openmmlab.com/mmclassification/v1/blip2/blip2_3rdparty_pretrain_20230505-f7ef4390.pth
```
<!-- [TABS-END] -->
## Models and results
### Image Caption on COCO
| Model | Params (M) | BLEU-4 | CIDER | Config | Download |
| :------------------------------------------ | :--------: | :----: | :----: | :----------------------------------------: | :-------------------------------------------------------------------------------------------: |
| `blip2-opt2.7b_3rdparty-zeroshot_caption`\* | 3770.47 | 32.90 | 111.10 | [config](./blip2-opt2.7b_8xb32_caption.py) | [model](https://download.openmmlab.com/mmclassification/v1/blip2/blip2-opt2.7b_3rdparty_pretrain_20230505-b51db4e1.pth) |
### Visual Question Answering on VQAv2
| Model | Params (M) | Accuracy | Config | Download |
| :-------------------------------------- | :--------: | :------: | :------------------------------------: | :-------------------------------------------------------------------------------------------------------: |
| `blip2-opt2.7b_3rdparty-zeroshot_vqa`\* | 3770.47 | 53.50 | [config](./blip2-opt2.7b_8xb16_vqa.py) | [model](https://download.openmmlab.com/mmclassification/v1/blip2/blip2-opt2.7b_3rdparty_pretrain_20230505-b51db4e1.pth) |
### Image-To-Text Retrieval on COCO
| Model | Params (M) | Recall@1 | Config | Download |
| :--------------------------- | :--------: | :------: | :----------------------------------: | :-------------------------------------------------------------------------------------------------------------: |
| `blip2_3rdparty_retrieval`\* | 1173.19 | 85.40 | [config](./blip2_8xb32_retrieval.py) | [model](https://download.openmmlab.com/mmclassification/v1/blip2/blip2_3rdparty_pretrain_20230505-f7ef4390.pth) |
*Models with * are converted from the [official repo](https://github.com/salesforce/LAVIS). The config files of these models are only for inference. We haven't reproduce the training results.*
## Citation
```bibtex
@article{beitv2,
title={Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models},
author={Li, Junnan and Li, Dongxu and Savarese, Silvio and Hoi, Steven},
year={2023},
eprint={2301.12597},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
```
_base_ = [
'../_base_/datasets/gqa.py',
'../_base_/default_runtime.py',
]
# model settings
model = dict(
type='Blip2VQA',
tokenizer=dict(
type='AutoTokenizer', name_or_path='facebook/opt-2.7b',
use_fast=False),
vision_backbone=dict(
type='BEiTViT',
# eva-g without the final layer
arch=dict(
embed_dims=1408,
num_layers=39,
num_heads=16,
feedforward_channels=6144,
),
img_size=364,
patch_size=14,
out_indices=-2,
layer_scale_init_value=0.0,
use_abs_pos_emb=True,
use_rel_pos_bias=False,
frozen_stages=39,
final_norm=False,
use_shared_rel_pos_bias=False,
out_type='raw'),
text_backbone=dict(
type='OPTForCausalLM', name_or_path='facebook/opt-2.7b'),
multimodal_backbone=dict(
type='Qformer',
model_style='bert-base-uncased',
vision_model_width=1408,
add_cross_attention=True,
cross_attention_freq=2,
num_query_token=32),
vision_neck=dict(
type='LinearClsHead',
in_channels=768,
num_classes=2560,
),
prompt='Question: {} Short Answer:',
max_txt_len=10)
# data settings
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='RandomResizedCrop', scale=224),
dict(type='PackInputs', algorithm_keys=['question', 'gt_answer']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='Resize',
scale=(224, 224),
interpolation='bicubic',
backend='pillow'),
dict(
type='CleanCaption',
keys=['question'],
),
dict(type='PackInputs', algorithm_keys=['question', 'gt_answer']),
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# schedule settings
optim_wrapper = dict(optimizer=dict(type='AdamW', lr=1e-5, weight_decay=0.05))
param_scheduler = [
dict(
type='CosineAnnealingLR',
by_epoch=True,
begin=0,
end=10,
)
]
train_cfg = dict(max_epochs=10)
val_cfg = dict()
test_cfg = dict()
_base_ = [
'../_base_/datasets/coco_vqa.py',
'../_base_/default_runtime.py',
]
# model settings
model = dict(
type='Blip2VQA',
tokenizer=dict(
type='AutoTokenizer', name_or_path='facebook/opt-2.7b',
use_fast=False),
vision_backbone=dict(
type='BEiTViT',
# eva-g without the final layer
arch=dict(
embed_dims=1408,
num_layers=39,
num_heads=16,
feedforward_channels=6144,
),
img_size=364,
patch_size=14,
out_indices=-2,
layer_scale_init_value=0.0,
use_abs_pos_emb=True,
use_rel_pos_bias=False,
frozen_stages=39,
final_norm=False,
use_shared_rel_pos_bias=False,
out_type='raw'),
text_backbone=dict(
type='OPTForCausalLM', name_or_path='facebook/opt-2.7b'),
multimodal_backbone=dict(
type='Qformer',
model_style='bert-base-uncased',
vision_model_width=1408,
add_cross_attention=True,
cross_attention_freq=2,
num_query_token=32),
vision_neck=dict(
type='LinearClsHead',
in_channels=768,
num_classes=2560,
),
prompt='Question: {} Answer:',
max_txt_len=10)
# data settings
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='RandomResizedCrop', scale=224),
dict(
type='PackInputs',
algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'],
meta_keys=['question_id', 'image_id'],
),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='Resize',
scale=(224, 224),
interpolation='bicubic',
backend='pillow'),
dict(
type='CleanCaption',
keys=['question'],
),
dict(
type='PackInputs',
algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'],
meta_keys=['question_id', 'image_id'],
),
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# schedule settings
optim_wrapper = dict(optimizer=dict(type='AdamW', lr=1e-5, weight_decay=0.05))
param_scheduler = [
dict(
type='CosineAnnealingLR',
by_epoch=True,
begin=0,
end=10,
)
]
train_cfg = dict(max_epochs=10)
val_cfg = dict()
test_cfg = dict()
_base_ = [
'../_base_/datasets/coco_caption.py',
'../_base_/default_runtime.py',
]
# model settings
model = dict(
type='Blip2Caption',
tokenizer=dict(
type='AutoTokenizer', name_or_path='facebook/opt-2.7b',
use_fast=False),
vision_backbone=dict(
type='BEiTViT',
# eva-g without the final layer
arch=dict(
embed_dims=1408,
num_layers=39,
num_heads=16,
feedforward_channels=6144,
),
img_size=364,
patch_size=14,
out_indices=-2,
layer_scale_init_value=0.0,
use_abs_pos_emb=True,
use_rel_pos_bias=False,
frozen_stages=39,
final_norm=False,
use_shared_rel_pos_bias=False,
out_type='raw'),
text_backbone=dict(
type='OPTForCausalLM', name_or_path='facebook/opt-2.7b'),
multimodal_backbone=dict(
type='Qformer',
model_style='bert-base-uncased',
vision_model_width=1408,
add_cross_attention=True,
cross_attention_freq=2,
num_query_token=32),
vision_neck=dict(
type='LinearClsHead',
in_channels=768,
num_classes=2560,
),
prompt='a photo of',
max_txt_len=30)
# schedule settings
optim_wrapper = dict(optimizer=dict(type='AdamW', lr=1e-5, weight_decay=0.05))
param_scheduler = [
dict(
type='CosineAnnealingLR',
by_epoch=True,
begin=0,
end=10,
)
]
train_cfg = dict(by_epoch=True, max_epochs=10)
val_cfg = dict()
test_cfg = dict()
# dataset settings
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='Resize',
scale=(364, 364),
interpolation='bicubic',
backend='pillow'),
dict(type='PackInputs', meta_keys=['image_id']),
]
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
_base_ = [
'../_base_/datasets/coco_retrieval.py',
'../_base_/default_runtime.py',
]
# model settings
model = dict(
type='Blip2Retrieval',
tokenizer=dict(type='Blip2Tokenizer', name_or_path='bert-base-uncased'),
vision_backbone=dict(
type='BEiTViT',
# eva-g without the final layer
arch=dict(
embed_dims=1408,
num_layers=39,
num_heads=16,
feedforward_channels=6144,
),
img_size=364,
patch_size=14,
layer_scale_init_value=0.0,
use_abs_pos_emb=True,
use_rel_pos_bias=False,
final_norm=False,
use_shared_rel_pos_bias=False,
out_type='raw'),
multimodal_backbone=dict(
type='Qformer',
model_style='bert-base-uncased',
vision_model_width=1408,
add_cross_attention=True,
cross_attention_freq=2,
num_query_token=32),
vision_neck=dict(
type='LinearClsHead',
in_channels=768,
num_classes=256,
),
text_neck=dict(
type='LinearClsHead',
in_channels=768,
num_classes=256,
),
multimodal_head=dict(
type='ITMHead',
hidden_size=768,
with_pooler=False,
),
topk=128,
max_txt_len=35,
)
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='Resize',
scale=(364, 364),
interpolation='bicubic',
backend='pillow'),
dict(type='CleanCaption', keys='text'),
dict(
type='PackInputs',
algorithm_keys=['text', 'gt_text_id', 'gt_image_id'],
meta_keys=['image_id']),
]
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# optimizer
optimizer = dict(type='AdamW', lr=2e-5, weight_decay=0.04)
optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer)
# learning rate scheduler
param_scheduler = [dict(type='CosineAnnealingLR', by_epoch=True)]
# runtime settings
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=6)
val_cfg = dict(type='RetrievalValLoop')
test_cfg = dict(type='RetrievalTestLoop')
randomness = dict(seed=42)
Collections:
- Name: BLIP-2
Metadata:
Training Data:
- COCO
- VG
- CC3M
- CC12M
- SBU
- LAION-400M
Training Resources: 8x A100 GPUs
Architecture:
- Transformer
- Q-Former
Paper:
Title: 'BLIP-2: Bootstrapping Language-Image Pre-training with Frozen Image
Encoders and Large Language Models'
URL: https://arxiv.org/abs/2301.12597
README: configs/blip2/README.md
Models:
- Name: blip2_3rdparty_retrieval
Metadata:
FLOPs: null
Parameters: 1173191358
In Collection: BLIP-2
Results:
- Task: Image-To-Text Retrieval
Dataset: COCO
Metrics:
Recall@1: 85.4
- Task: Text-To-Image Retrieval
Dataset: COCO
Metrics:
Recall@1: 68.3
Weights: https://download.openmmlab.com/mmclassification/v1/blip2/blip2_3rdparty_pretrain_20230505-f7ef4390.pth
Config: configs/blip2/blip2_8xb32_retrieval.py
Converted From:
Weights: https://storage.googleapis.com/sfr-vision-language-research/LAVIS/models/BLIP2/blip2_pretrained_opt2.7b.pth
Code: https://github.com/salesforce/LAVIS
- Name: blip2-opt2.7b_3rdparty-zeroshot_vqa
Metadata:
FLOPs: null
Parameters: 3770465152
In Collection: BLIP-2
Results:
- Task: Visual Question Answering
Dataset: VQAv2
Metrics:
Accuracy: 53.5
Weights: https://download.openmmlab.com/mmclassification/v1/blip2/blip2-opt2.7b_3rdparty_pretrain_20230505-b51db4e1.pth
Config: configs/blip2/blip2-opt2.7b_8xb16_vqa.py
Converted From:
Weights: https://storage.googleapis.com/sfr-vision-language-research/LAVIS/models/BLIP2/blip2_pretrained_opt2.7b.pth
Code: https://github.com/salesforce/LAVIS
- Name: blip2-opt2.7b_3rdparty-zeroshot_caption
Metadata:
FLOPs: null
Parameters: 3770465152
In Collection: BLIP-2
Results:
- Task: Image Caption
Dataset: COCO
Metrics:
BLEU-4: 32.90
CIDER: 111.10
Weights: https://download.openmmlab.com/mmclassification/v1/blip2/blip2-opt2.7b_3rdparty_pretrain_20230505-b51db4e1.pth
Config: configs/blip2/blip2-opt2.7b_8xb32_caption.py
Converted From:
Weights: https://storage.googleapis.com/sfr-vision-language-research/LAVIS/models/BLIP2/blip2_pretrained_opt2.7b.pth
Code: https://github.com/salesforce/LAVIS
# BYOL
> [Bootstrap your own latent: A new approach to self-supervised Learning](https://arxiv.org/abs/2006.07733)
<!-- [ALGORITHM] -->
## Abstract
**B**ootstrap **Y**our **O**wn **L**atent (BYOL) is a new approach to self-supervised image representation learning. BYOL relies on two neural networks, referred to as online and target networks, that interact and learn from each other. From an augmented view of an image, we train the online network to predict the target network representation of the same image under a different augmented view. At the same time, we update the target network with a slow-moving average of the online network.
<div align=center>
<img src="https://user-images.githubusercontent.com/36138628/149720208-5ffbee78-1437-44c7-9ddb-b8caab60d2c3.png" width="800" />
</div>
## How to use it?
<!-- [TABS-BEGIN] -->
**Predict image**
```python
from mmpretrain import inference_model
predict = inference_model('resnet50_byol-pre_8xb512-linear-coslr-90e_in1k', 'demo/bird.JPEG')
print(predict['pred_class'])
print(predict['pred_score'])
```
**Use the model**
```python
import torch
from mmpretrain import get_model
model = get_model('byol_resnet50_16xb256-coslr-200e_in1k', pretrained=True)
inputs = torch.rand(1, 3, 224, 224)
out = model(inputs)
print(type(out))
# To extract features.
feats = model.extract_feat(inputs)
print(type(feats))
```
**Train/Test Command**
Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset).
Train:
```shell
python tools/train.py configs/byol/byol_resnet50_16xb256-coslr-200e_in1k.py
```
Test:
```shell
python tools/test.py configs/byol/benchmarks/resnet50_8xb512-linear-coslr-90e_in1k.py https://download.openmmlab.com/mmselfsup/1.x/byol/byol_resnet50_16xb256-coslr-200e_in1k/resnet50_linear-8xb512-coslr-90e_in1k/resnet50_linear-8xb512-coslr-90e_in1k_20220825-7596c6f5.pth
```
<!-- [TABS-END] -->
## Models and results
### Pretrained models
| Model | Params (M) | Flops (G) | Config | Download |
| :-------------------------------------- | :--------: | :-------: | :------------------------------------------------: | :------------------------------------------------------------------------------------------: |
| `byol_resnet50_16xb256-coslr-200e_in1k` | 68.02 | 4.11 | [config](byol_resnet50_16xb256-coslr-200e_in1k.py) | [model](https://download.openmmlab.com/mmselfsup/1.x/byol/byol_resnet50_16xb256-coslr-200e_in1k/byol_resnet50_16xb256-coslr-200e_in1k_20220825-de817331.pth) \| [log](https://download.openmmlab.com/mmselfsup/1.x/byol/byol_resnet50_16xb256-coslr-200e_in1k/byol_resnet50_16xb256-coslr-200e_in1k_20220825-de817331.json) |
### Image Classification on ImageNet-1k
| Model | Pretrain | Params (M) | Flops (G) | Top-1 (%) | Config | Download |
| :---------------------------------------- | :------------------------------------------: | :--------: | :-------: | :-------: | :----------------------------------------: | :-------------------------------------------: |
| `resnet50_byol-pre_8xb512-linear-coslr-90e_in1k` | [BYOL](https://download.openmmlab.com/mmselfsup/1.x/byol/byol_resnet50_16xb256-coslr-200e_in1k/byol_resnet50_16xb256-coslr-200e_in1k_20220825-de817331.pth) | 25.56 | 4.11 | 71.80 | [config](benchmarks/resnet50_8xb512-linear-coslr-90e_in1k.py) | [model](https://download.openmmlab.com/mmselfsup/1.x/byol/byol_resnet50_16xb256-coslr-200e_in1k/resnet50_linear-8xb512-coslr-90e_in1k/resnet50_linear-8xb512-coslr-90e_in1k_20220825-7596c6f5.pth) \| [log](https://download.openmmlab.com/mmselfsup/1.x/byol/byol_resnet50_16xb256-coslr-200e_in1k/resnet50_linear-8xb512-coslr-90e_in1k/resnet50_linear-8xb512-coslr-90e_in1k_20220825-7596c6f5.json) |
## Citation
```bibtex
@inproceedings{grill2020bootstrap,
title={Bootstrap your own latent: A new approach to self-supervised learning},
author={Grill, Jean-Bastien and Strub, Florian and Altch{\'e}, Florent and Tallec, Corentin and Richemond, Pierre H and Buchatskaya, Elena and Doersch, Carl and Pires, Bernardo Avila and Guo, Zhaohan Daniel and Azar, Mohammad Gheshlaghi and others},
booktitle={NeurIPS},
year={2020}
}
```
_base_ = 'mmdet::mask_rcnn/mask-rcnn_r50-caffe-c4_1x_coco.py'
# https://github.com/open-mmlab/mmdetection/blob/dev-3.x/configs/mask_rcnn/mask-rcnn_r50-caffe-c4_1x_coco.py
data_preprocessor = dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_mask=True,
pad_size_divisor=32)
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
data_preprocessor=data_preprocessor,
backbone=dict(
frozen_stages=-1,
norm_cfg=norm_cfg,
norm_eval=False,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
roi_head=dict(
shared_head=dict(
type='ResLayerExtraNorm',
norm_cfg=norm_cfg,
norm_eval=False,
style='pytorch')))
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=12, val_interval=1)
custom_imports = dict(
imports=['mmpretrain.models.utils.res_layer_extra_norm'],
allow_failed_imports=False)
_base_ = 'mmdet::mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py'
# https://github.com/open-mmlab/mmdetection/blob/dev-3.x/configs/mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
backbone=dict(frozen_stages=-1, norm_cfg=norm_cfg, norm_eval=False),
neck=dict(norm_cfg=norm_cfg),
roi_head=dict(
bbox_head=dict(type='Shared4Conv1FCBBoxHead', norm_cfg=norm_cfg),
mask_head=dict(norm_cfg=norm_cfg)))
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
_base_ = [
'../../_base_/models/resnet50.py',
'../../_base_/datasets/imagenet_bs32_pil_resize.py',
'../../_base_/schedules/imagenet_lars_coslr_90e.py',
'../../_base_/default_runtime.py',
]
model = dict(
backbone=dict(
frozen_stages=4,
init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')))
# dataset summary
train_dataloader = dict(batch_size=512)
# runtime settings
default_hooks = dict(
checkpoint=dict(type='CheckpointHook', interval=10, max_keep_ckpts=3))
_base_ = [
'../_base_/datasets/imagenet_bs32_byol.py',
'../_base_/schedules/imagenet_lars_coslr_200e.py',
'../_base_/default_runtime.py',
]
train_dataloader = dict(batch_size=256)
# model settings
model = dict(
type='BYOL',
base_momentum=0.01,
backbone=dict(
type='ResNet',
depth=50,
norm_cfg=dict(type='SyncBN'),
zero_init_residual=False),
neck=dict(
type='NonLinearNeck',
in_channels=2048,
hid_channels=4096,
out_channels=256,
num_layers=2,
with_bias=True,
with_last_bn=False,
with_avg_pool=True),
head=dict(
type='LatentPredictHead',
predictor=dict(
type='NonLinearNeck',
in_channels=256,
hid_channels=4096,
out_channels=256,
num_layers=2,
with_bias=True,
with_last_bn=False,
with_avg_pool=False),
loss=dict(type='CosineSimilarityLoss')),
)
# optimizer
optimizer = dict(type='LARS', lr=4.8, momentum=0.9, weight_decay=1e-6)
optim_wrapper = dict(
type='OptimWrapper',
optimizer=optimizer,
paramwise_cfg=dict(
custom_keys={
'bn': dict(decay_mult=0, lars_exclude=True),
'bias': dict(decay_mult=0, lars_exclude=True),
# bn layer in ResNet block downsample module
'downsample.1': dict(decay_mult=0, lars_exclude=True),
}),
)
# runtime settings
default_hooks = dict(checkpoint=dict(max_keep_ckpts=3))
# NOTE: `auto_scale_lr` is for automatically scaling LR
# based on the actual training batch size.
auto_scale_lr = dict(base_batch_size=4096)
Collections:
- Name: BYOL
Metadata:
Training Data: ImageNet-1k
Training Techniques:
- LARS
Training Resources: 8x V100 GPUs (b256), 16x A100-80G GPUs (b4096)
Architecture:
- ResNet
- BYOL
Paper:
Title: 'Bootstrap your own latent: A new approach to self-supervised Learning'
URL: https://arxiv.org/abs/2006.07733
README: configs/byol/README.md
Models:
- Name: byol_resnet50_16xb256-coslr-200e_in1k
Metadata:
Epochs: 200
Batch Size: 4096
FLOPs: 4109364224
Parameters: 68024448
Training Data: ImageNet-1k
In Collection: BYOL
Results: null
Weights: https://download.openmmlab.com/mmselfsup/1.x/byol/byol_resnet50_16xb256-coslr-200e_in1k/byol_resnet50_16xb256-coslr-200e_in1k_20220825-de817331.pth
Config: configs/byol/byol_resnet50_16xb256-coslr-200e_in1k.py
Downstream:
- resnet50_byol-pre_8xb512-linear-coslr-90e_in1k
- Name: resnet50_byol-pre_8xb512-linear-coslr-90e_in1k
Metadata:
Epochs: 90
Batch Size: 4096
FLOPs: 4109464576
Parameters: 25557032
Training Data: ImageNet-1k
In Collection: BYOL
Results:
- Task: Image Classification
Dataset: ImageNet-1k
Metrics:
Top 1 Accuracy: 71.8
Weights: https://download.openmmlab.com/mmselfsup/1.x/byol/byol_resnet50_16xb256-coslr-200e_in1k/resnet50_linear-8xb512-coslr-90e_in1k/resnet50_linear-8xb512-coslr-90e_in1k_20220825-7596c6f5.pth
Config: configs/byol/benchmarks/resnet50_8xb512-linear-coslr-90e_in1k.py
# CAE
> [Context Autoencoder for Self-Supervised Representation Learning](https://arxiv.org/abs/2202.03026)
<!-- [ALGORITHM] -->
## Abstract
We present a novel masked image modeling (MIM) approach, context autoencoder (CAE), for self-supervised learning. We randomly partition the image into two sets: visible patches and masked patches. The CAE architecture consists of: (i) an encoder that takes visible patches as input and outputs their latent representations, (ii) a latent context regressor that predicts the masked patch representations from the visible patch representations that are not updated in this regressor, (iii) a decoder that takes the estimated masked patch representations as input and makes predictions for the masked patches, and (iv) an alignment module that aligns the masked patch representation estimation with the masked patch representations computed from the encoder. In comparison to previous MIM methods that couple the encoding and decoding roles, e.g., using a single module in BEiT, our approach attempts to separate the encoding role (content understanding) from the decoding role (making predictions for masked patches) using different modules, improving the content understanding capability. In addition, our approach makes predictions from the visible patches to the masked patches in the latent representation space that is expected to take on semantics. In addition, we present the explanations about why contrastive pretraining and supervised pretraining perform similarly and why MIM potentially performs better. We demonstrate the effectiveness of our CAE through superior transfer performance in downstream tasks: semantic segmentation, and object detection and instance segmentation.
<div align=center>
<img src="https://user-images.githubusercontent.com/30762564/165459947-6c6ef13c-0593-4765-b44e-6da0a079802a.png" width="70%"/>
</div>
## How to use it?
<!-- [TABS-BEGIN] -->
**Predict image**
```python
from mmpretrain import inference_model
predict = inference_model('beit-base-p16_cae-pre_8xb128-coslr-100e_in1k', 'demo/bird.JPEG')
print(predict['pred_class'])
print(predict['pred_score'])
```
**Use the model**
```python
import torch
from mmpretrain import get_model
model = get_model('cae_beit-base-p16_8xb256-amp-coslr-300e_in1k', pretrained=True)
inputs = torch.rand(1, 3, 224, 224)
out = model(inputs)
print(type(out))
# To extract features.
feats = model.extract_feat(inputs)
print(type(feats))
```
**Train/Test Command**
Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset).
Train:
```shell
python tools/train.py configs/cae/cae_beit-base-p16_8xb256-amp-coslr-300e_in1k.py
```
Test:
```shell
python tools/test.py configs/cae/benchmarks/beit-base-p16_8xb128-coslr-100e_in1k.py https://download.openmmlab.com/mmselfsup/1.x/cae/cae_vit-base-p16_16xb128-fp16-coslr-300e_in1k/vit-base-p16_ft-8xb128-coslr-100e-rpe_in1k/vit-base-p16_ft-8xb128-coslr-100e-rpe_in1k_20220825-f3d234cd.pth
```
<!-- [TABS-END] -->
## Models and results
### Pretrained models
| Model | Params (M) | Flops (G) | Config | Download |
| :--------------------------------------------- | :--------: | :-------: | :-------------------------------------------------------: | :----------------------------------------------------------------------------: |
| `cae_beit-base-p16_8xb256-amp-coslr-300e_in1k` | 288.43 | 17.58 | [config](cae_beit-base-p16_8xb256-amp-coslr-300e_in1k.py) | [model](https://download.openmmlab.com/mmselfsup/1.x/cae/cae_vit-base-p16_8xb256-amp-coslr-300e_in1k/cae_vit-base-p16_8xb256-amp-coslr-300e_in1k_20221230-808170f3.pth) \| [log](https://download.openmmlab.com/mmselfsup/1.x/cae/cae_vit-base-p16_8xb256-amp-coslr-300e_in1k/cae_vit-base-p16_8xb256-amp-coslr-300e_in1k_20221230-808170f3.json) |
### Image Classification on ImageNet-1k
| Model | Pretrain | Params (M) | Flops (G) | Top-1 (%) | Config | Download |
| :---------------------------------------- | :------------------------------------------: | :--------: | :-------: | :-------: | :----------------------------------------: | :-------------------------------------------: |
| `beit-base-p16_cae-pre_8xb128-coslr-100e_in1k` | [CAE](https://download.openmmlab.com/mmselfsup/1.x/cae/cae_vit-base-p16_8xb256-amp-coslr-300e_in1k/cae_vit-base-p16_8xb256-amp-coslr-300e_in1k_20221230-808170f3.pth) | 86.68 | 17.58 | 83.20 | [config](benchmarks/beit-base-p16_8xb128-coslr-100e_in1k.py) | [model](https://download.openmmlab.com/mmselfsup/1.x/cae/cae_vit-base-p16_16xb128-fp16-coslr-300e_in1k/vit-base-p16_ft-8xb128-coslr-100e-rpe_in1k/vit-base-p16_ft-8xb128-coslr-100e-rpe_in1k_20220825-f3d234cd.pth) \| [log](https://download.openmmlab.com/mmselfsup/1.x/cae/cae_vit-base-p16_16xb128-fp16-coslr-300e_in1k/vit-base-p16_ft-8xb128-coslr-100e-rpe_in1k/vit-base-p16_ft-8xb128-coslr-100e-rpe_in1k_20220825-f3d234cd.json) |
## Citation
```bibtex
@article{CAE,
title={Context Autoencoder for Self-Supervised Representation Learning},
author={Xiaokang Chen, Mingyu Ding, Xiaodi Wang, Ying Xin, Shentong Mo,
Yunhao Wang, Shumin Han, Ping Luo, Gang Zeng, Jingdong Wang},
journal={ArXiv},
year={2022}
}
```
_base_ = [
'../../_base_/datasets/imagenet_bs64_swin_224.py',
'../../_base_/schedules/imagenet_bs1024_adamw_swin.py',
'../../_base_/default_runtime.py'
]
# CAE fine-tuning setting
# dataset
data_preprocessor = dict(
num_classes=1000,
# RGB format normalization parameters
mean=[127.5, 127.5, 127.5],
std=[127.5, 127.5, 127.5],
# convert image from BGR to RGB
to_rgb=True,
)
bgr_mean = data_preprocessor['mean'][::-1]
bgr_std = data_preprocessor['std'][::-1]
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='RandomResizedCrop',
scale=224,
backend='pillow',
interpolation='bicubic'),
dict(type='RandomFlip', prob=0.5, direction='horizontal'),
dict(
type='RandAugment',
policies='timm_increasing',
num_policies=2,
total_level=10,
magnitude_level=9,
magnitude_std=0.5,
hparams=dict(
pad_val=[round(x) for x in bgr_mean], interpolation='bicubic')),
dict(
type='RandomErasing',
erase_prob=0.25,
mode='rand',
min_area_ratio=0.02,
max_area_ratio=1 / 3,
fill_color=bgr_mean,
fill_std=bgr_std),
dict(type='PackInputs'),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='ResizeEdge',
scale=256,
edge='short',
backend='pillow',
interpolation='bicubic'),
dict(type='CenterCrop', crop_size=224),
dict(type='PackInputs'),
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline), batch_size=128)
val_dataloader = dict(dataset=dict(pipeline=test_pipeline), batch_size=128)
# model settings
model = dict(
type='ImageClassifier',
backbone=dict(
type='BEiTViT',
arch='base',
img_size=224,
patch_size=16,
final_norm=False, # do not use final norm
drop_path_rate=0.1,
layer_scale_init_value=0.1,
out_type='avg_featmap',
use_abs_pos_emb=True,
use_rel_pos_bias=True,
use_shared_rel_pos_bias=False,
init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')),
neck=None,
head=dict(
type='LinearClsHead',
num_classes=1000,
in_channels=768,
loss=dict(
type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'),
init_cfg=dict(type='TruncNormal', layer='Linear', std=2e-5)),
train_cfg=dict(augments=[
dict(type='Mixup', alpha=0.8),
dict(type='CutMix', alpha=1.0)
]))
# optimizer wrapper
optim_wrapper = dict(
optimizer=dict(
type='AdamW', lr=8e-3, betas=(0.9, 0.999), weight_decay=0.05),
constructor='LearningRateDecayOptimWrapperConstructor',
paramwise_cfg=dict(
layer_decay_rate=0.65,
custom_keys={
'.ln': dict(decay_mult=0.0),
'.bias': dict(decay_mult=0.0),
'.cls_token': dict(decay_mult=0.0),
'.pos_embed': dict(decay_mult=0.0)
}))
# learning rate scheduler
param_scheduler = [
dict(
type='LinearLR',
start_factor=1e-4,
by_epoch=True,
begin=0,
end=5,
convert_to_iter_based=True),
dict(
type='CosineAnnealingLR',
T_max=95,
by_epoch=True,
begin=5,
end=100,
eta_min=1e-6,
convert_to_iter_based=True)
]
default_hooks = dict(
# save checkpoint per epoch.
checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=3))
train_cfg = dict(by_epoch=True, max_epochs=100)
randomness = dict(seed=0)
_base_ = '../_base_/default_runtime.py'
# dataset settings
dataset_type = 'ImageNet'
data_root = 'data/imagenet/'
data_preprocessor = dict(
type='TwoNormDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
second_mean=[-31.875, -31.875, -31.875],
second_std=[318.75, 318.75, 318.75],
to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='RandomFlip', prob=0.5),
dict(
type='RandomResizedCropAndInterpolationWithTwoPic',
size=224,
second_size=112,
interpolation='bicubic',
second_interpolation='lanczos',
scale=(0.08, 1.0)),
dict(
type='BEiTMaskGenerator',
input_size=(14, 14),
num_masking_patches=75,
max_num_patches=None,
min_num_patches=16),
dict(type='PackInputs')
]
train_dataloader = dict(
batch_size=256,
num_workers=8,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
collate_fn=dict(type='default_collate'),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='meta/train.txt',
data_prefix=dict(img_path='train/'),
pipeline=train_pipeline))
# model settings
model = dict(
type='CAE',
backbone=dict(
type='CAEPretrainViT',
arch='b',
patch_size=16,
layer_scale_init_value=0.1,
bias='qv_bias'),
neck=dict(
type='CAENeck',
embed_dims=768,
num_heads=12,
regressor_depth=4,
decoder_depth=4,
mlp_ratio=4,
layer_scale_init_value=0.1,
),
head=dict(type='CAEHead', loss=dict(type='CAELoss', lambd=2)),
target_generator=dict(
type='DALL-E',
init_cfg=dict(
type='Pretrained',
checkpoint= # noqa: E251
'https://download.openmmlab.com/mmselfsup/1.x/target_generator_ckpt/dalle_encoder.pth', # noqa: E501
)),
base_momentum=0.0)
# optimizer wrapper
optim_wrapper = dict(
type='AmpOptimWrapper',
loss_scale='dynamic',
optimizer=dict(
type='AdamW', lr=1.5e-3, betas=(0.9, 0.999), weight_decay=0.05),
clip_grad=dict(max_norm=3.0),
paramwise_cfg=dict(
bias_decay_mult=0.0, norm_decay_mult=0.0, flat_decay_mult=0.0))
# learning rate scheduler
param_scheduler = [
dict(
type='LinearLR',
start_factor=1e-4,
by_epoch=True,
begin=0,
end=10,
convert_to_iter_based=True),
dict(
type='CosineAnnealingLR',
T_max=290,
eta_min=1e-5,
by_epoch=True,
begin=10,
end=300,
convert_to_iter_based=True)
]
# runtime settings
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=300)
default_hooks = dict(
# only keeps the latest 3 checkpoints
checkpoint=dict(type='CheckpointHook', interval=10, max_keep_ckpts=3))
randomness = dict(seed=0, diff_rank_seed=True)
find_unused_parameters = True
# NOTE: `auto_scale_lr` is for automatically scaling LR
# based on the actual training batch size.
auto_scale_lr = dict(base_batch_size=2048)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment