"lib/llm/src/http/service.rs" did not exist on "d0d35a9ee45ae4c5d8aec6d788f7594ec4a28ba6"
Commit 495d9ed9 authored by limm's avatar limm
Browse files

add part code

parent 59b09903
Pipeline #2799 canceled with stages
_base_ = [
'../_base_/datasets/voc_bs16.py',
'../_base_/default_runtime.py',
]
# model settings
# load model pretrained on imagenet
pretrained = 'https://download.openmmlab.com/mmclassification/v0/vgg/vgg16_batch256_imagenet_20210208-db26f1a5.pth' # noqa
# use different head for multilabel task
model = dict(
type='ImageClassifier',
backbone=dict(
type='VGG',
depth=16,
num_classes=20,
init_cfg=dict(
type='Pretrained', checkpoint=pretrained, prefix='backbone')),
neck=None,
head=dict(
type='MultiLabelClsHead',
loss=dict(type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)))
# schedule settings
optim_wrapper = dict(
optimizer=dict(type='SGD', lr=0.001, momentum=0.9, weight_decay=0),
# update the final linear by 10 times learning rate.
paramwise_cfg=dict(custom_keys={'.backbone.classifier': dict(lr_mult=10)}),
)
# learning policy
param_scheduler = dict(type='StepLR', by_epoch=True, step_size=20, gamma=0.1)
# train, val, test setting
train_cfg = dict(by_epoch=True, max_epochs=40, val_interval=1)
val_cfg = dict()
test_cfg = dict()
# NOTE: `auto_scale_lr` is for automatically scaling LR
# based on the actual training batch size.
# base_batch_size = (8 GPUs) x (16 samples per GPU)
auto_scale_lr = dict(base_batch_size=128)
_base_ = [
'../_base_/models/vgg16.py',
'../_base_/datasets/imagenet_bs32_pil_resize.py',
'../_base_/schedules/imagenet_bs256.py',
'../_base_/default_runtime.py',
]
# schedule settings
optim_wrapper = dict(optimizer=dict(lr=0.01))
_base_ = [
'../_base_/models/vgg16bn.py',
'../_base_/datasets/imagenet_bs32_pil_resize.py',
'../_base_/schedules/imagenet_bs256.py',
'../_base_/default_runtime.py',
]
_base_ = [
'../_base_/models/vgg19.py',
'../_base_/datasets/imagenet_bs32_pil_resize.py',
'../_base_/schedules/imagenet_bs256.py',
'../_base_/default_runtime.py',
]
# schedule settings
optim_wrapper = dict(optimizer=dict(lr=0.01))
_base_ = [
'../_base_/models/vgg19bn.py',
'../_base_/datasets/imagenet_bs32_pil_resize.py',
'../_base_/schedules/imagenet_bs256.py',
'../_base_/default_runtime.py',
]
# VIG
> [Vision GNN: An Image is Worth Graph of Nodes](https://arxiv.org/abs/2206.00272)
<!-- [ALGORITHM] -->
## Abstract
Network architecture plays a key role in the deep learning-based computer vision system. The widely-used convolutional neural network and transformer treat the image as a grid or sequence structure, which is not flexible to capture irregular and complex objects. In this paper, we propose to represent the image as a graph structure and introduce a new Vision GNN (ViG) architecture to extract graph-level feature for visual tasks. We first split the image to a number of patches which are viewed as nodes, and construct a graph by connecting the nearest neighbors. Based on the graph representation of images, we build our ViG model to transform and exchange information among all the nodes. ViG consists of two basic modules: Grapher module with graph convolution for aggregating and updating graph information, and FFN module with two linear layers for node feature transformation. Both isotropic and pyramid architectures of ViG are built with different model sizes. Extensive experiments on image recognition and object detection tasks demonstrate the superiority of our ViG architecture. We hope this pioneering study of GNN on general visual tasks will provide useful inspiration and experience for future research.
<div align=center>
<img src="https://user-images.githubusercontent.com/26739999/212789461-f085e4da-9ce9-435f-93c0-e1b84d10b79f.png" width="50%"/>
</div>
## How to use it?
<!-- [TABS-BEGIN] -->
**Predict image**
```python
from mmpretrain import inference_model
predict = inference_model('vig-tiny_3rdparty_in1k', 'demo/bird.JPEG')
print(predict['pred_class'])
print(predict['pred_score'])
```
**Use the model**
```python
import torch
from mmpretrain import get_model
model = get_model('vig-tiny_3rdparty_in1k', pretrained=True)
inputs = torch.rand(1, 3, 224, 224)
out = model(inputs)
print(type(out))
# To extract features.
feats = model.extract_feat(inputs)
print(type(feats))
```
**Test Command**
Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset).
Test:
```shell
python tools/test.py configs/vig/vig-tiny_8xb128_in1k.py https://download.openmmlab.com/mmclassification/v0/vig/vig-tiny_3rdparty_in1k_20230117-6414c684.pth
```
<!-- [TABS-END] -->
## Models and results
### Image Classification on ImageNet-1k
| Model | Pretrain | Params (M) | Flops (G) | Top-1 (%) | Top-5 (%) | Config | Download |
| :---------------------------- | :----------: | :--------: | :-------: | :-------: | :-------: | :----------------------------------: | :------------------------------------------------------------------------------------: |
| `vig-tiny_3rdparty_in1k`\* | From scratch | 7.18 | 1.31 | 74.40 | 92.34 | [config](vig-tiny_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/vig/vig-tiny_3rdparty_in1k_20230117-6414c684.pth) |
| `vig-small_3rdparty_in1k`\* | From scratch | 22.75 | 4.54 | 80.61 | 95.28 | [config](vig-small_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/vig/vig-small_3rdparty_in1k_20230117-5338bf3b.pth) |
| `vig-base_3rdparty_in1k`\* | From scratch | 20.68 | 17.68 | 82.62 | 96.04 | [config](vig-base_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/vig/vig-base_3rdparty_in1k_20230117-92f6f12f.pth) |
| `pvig-tiny_3rdparty_in1k`\* | From scratch | 9.46 | 1.71 | 78.38 | 94.38 | [config](pvig-tiny_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/vig/pvig-tiny_3rdparty_in1k_20230117-eb77347d.pth) |
| `pvig-small_3rdparty_in1k`\* | From scratch | 29.02 | 4.57 | 82.00 | 95.97 | [config](pvig-small_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/vig/pvig-small_3rdparty_in1k_20230117-9433dc96.pth) |
| `pvig-medium_3rdparty_in1k`\* | From scratch | 51.68 | 8.89 | 83.12 | 96.35 | [config](pvig-medium_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/vig/pvig-medium_3rdparty_in1k_20230117-21057a6d.pth) |
| `pvig-base_3rdparty_in1k`\* | From scratch | 95.21 | 16.86 | 83.59 | 96.52 | [config](pvig-base_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/vig/pvig-base_3rdparty_in1k_20230117-dbab3c85.pth) |
*Models with * are converted from the [official repo](https://github.com/huawei-noah/Efficient-AI-Backbones/tree/master/vig_pytorch). The config files of these models are only for inference. We haven't reproduce the training results.*
## Citation
```bibtex
@inproceedings{han2022vig,
title={Vision GNN: An Image is Worth Graph of Nodes},
author={Kai Han and Yunhe Wang and Jianyuan Guo and Yehui Tang and Enhua Wu},
booktitle={NeurIPS},
year={2022}
}
```
Collections:
- Name: VIG
Metadata:
Training Data: ImageNet-1k
Architecture:
- Vision GNN
Paper:
Title: 'Vision GNN: An Image is Worth Graph of Nodes'
URL: https://arxiv.org/abs/2206.00272
README: configs/vig/README.md
Code:
URL: null
Version: null
Models:
- Name: vig-tiny_3rdparty_in1k
Metadata:
FLOPs: 1309000000
Parameters: 7185000
Training Data: ImageNet-1k
In Collection: VIG
Results:
- Dataset: ImageNet-1k
Metrics:
Top 1 Accuracy: 74.40
Top 5 Accuracy: 92.34
Task: Image Classification
Weights: https://download.openmmlab.com/mmclassification/v0/vig/vig-tiny_3rdparty_in1k_20230117-6414c684.pth
Config: configs/vig/vig-tiny_8xb128_in1k.py
Converted From:
Weights: https://github.com/huawei-noah/Efficient-AI-Backbones/releases/download/vig/vig_ti_74.5.pth
Code: https://github.com/huawei-noah/Efficient-AI-Backbones/tree/master/vig_pytorch
- Name: vig-small_3rdparty_in1k
Metadata:
FLOPs: 4535000000
Parameters: 22748000
Training Data: ImageNet-1k
In Collection: VIG
Results:
- Dataset: ImageNet-1k
Metrics:
Top 1 Accuracy: 80.61
Top 5 Accuracy: 95.28
Task: Image Classification
Weights: https://download.openmmlab.com/mmclassification/v0/vig/vig-small_3rdparty_in1k_20230117-5338bf3b.pth
Config: configs/vig/vig-small_8xb128_in1k.py
Converted From:
Weights: https://github.com/huawei-noah/Efficient-AI-Backbones/releases/download/vig/vig_s_80.6.pth
Code: https://github.com/huawei-noah/Efficient-AI-Backbones/tree/master/vig_pytorch
- Name: vig-base_3rdparty_in1k
Metadata:
FLOPs: 17681000000
Parameters: 20685000
Training Data: ImageNet-1k
In Collection: VIG
Results:
- Dataset: ImageNet-1k
Metrics:
Top 1 Accuracy: 82.62
Top 5 Accuracy: 96.04
Task: Image Classification
Weights: https://download.openmmlab.com/mmclassification/v0/vig/vig-base_3rdparty_in1k_20230117-92f6f12f.pth
Config: configs/vig/vig-base_8xb128_in1k.py
Converted From:
Weights: https://github.com/huawei-noah/Efficient-AI-Backbones/releases/download/vig/vig_b_82.6.pth
Code: https://github.com/huawei-noah/Efficient-AI-Backbones/tree/master/vig_pytorch
- Name: pvig-tiny_3rdparty_in1k
Metadata:
FLOPs: 1714000000
Parameters: 9458000
Training Data: ImageNet-1k
In Collection: VIG
Results:
- Dataset: ImageNet-1k
Metrics:
Top 1 Accuracy: 78.38
Top 5 Accuracy: 94.38
Task: Image Classification
Weights: https://download.openmmlab.com/mmclassification/v0/vig/pvig-tiny_3rdparty_in1k_20230117-eb77347d.pth
Config: configs/vig/pvig-tiny_8xb128_in1k.py
Converted From:
Weights: https://github.com/huawei-noah/Efficient-AI-Backbones/releases/download/pyramid-vig/pvig_ti_78.5.pth.tar
Code: https://github.com/huawei-noah/Efficient-AI-Backbones/tree/master/vig_pytorch
- Name: pvig-small_3rdparty_in1k
Metadata:
FLOPs: 4572000000
Parameters: 29024000
Training Data: ImageNet-1k
In Collection: VIG
Results:
- Dataset: ImageNet-1k
Metrics:
Top 1 Accuracy: 82.00
Top 5 Accuracy: 95.97
Task: Image Classification
Weights: https://download.openmmlab.com/mmclassification/v0/vig/pvig-small_3rdparty_in1k_20230117-9433dc96.pth
Config: configs/vig/pvig-small_8xb128_in1k.py
Converted From:
Weights: https://github.com/huawei-noah/Efficient-AI-Backbones/releases/download/pyramid-vig/pvig_s_82.1.pth.tar
Code: https://github.com/huawei-noah/Efficient-AI-Backbones/tree/master/vig_pytorch
- Name: pvig-medium_3rdparty_in1k
Metadata:
FLOPs: 8886000000
Parameters: 51682000
Training Data: ImageNet-1k
In Collection: VIG
Results:
- Dataset: ImageNet-1k
Metrics:
Top 1 Accuracy: 83.12
Top 5 Accuracy: 96.35
Task: Image Classification
Weights: https://download.openmmlab.com/mmclassification/v0/vig/pvig-medium_3rdparty_in1k_20230117-21057a6d.pth
Config: configs/vig/pvig-medium_8xb128_in1k.py
Converted From:
Weights: https://github.com/huawei-noah/Efficient-AI-Backbones/releases/download/pyramid-vig/pvig_m_83.1.pth.tar
Code: https://github.com/huawei-noah/Efficient-AI-Backbones/tree/master/vig_pytorch
- Name: pvig-base_3rdparty_in1k
Metadata:
FLOPs: 16861000000
Parameters: 95213000
Training Data: ImageNet-1k
In Collection: VIG
Results:
- Dataset: ImageNet-1k
Metrics:
Top 1 Accuracy: 83.59
Top 5 Accuracy: 96.52
Task: Image Classification
Weights: https://download.openmmlab.com/mmclassification/v0/vig/pvig-base_3rdparty_in1k_20230117-dbab3c85.pth
Config: configs/vig/pvig-base_8xb128_in1k.py
Converted From:
Weights: https://github.com/huawei-noah/Efficient-AI-Backbones/releases/download/pyramid-vig/pvig_b_83.66.pth.tar
Code: https://github.com/huawei-noah/Efficient-AI-Backbones/tree/master/vig_pytorch
_base_ = [
'../_base_/models/vig/pyramid_vig_base.py',
'../_base_/datasets/imagenet_bs128_vig_224.py',
'../_base_/schedules/imagenet_bs256.py',
'../_base_/default_runtime.py',
]
# dataset settings
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='ResizeEdge',
scale=235,
edge='short',
backend='pillow',
interpolation='bicubic'),
dict(type='CenterCrop', crop_size=224),
dict(type='PackInputs'),
]
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = dict(dataset=dict(pipeline=test_pipeline))
_base_ = [
'../_base_/models/vig/pyramid_vig_medium.py',
'../_base_/datasets/imagenet_bs128_vig_224.py',
'../_base_/schedules/imagenet_bs256.py',
'../_base_/default_runtime.py',
]
_base_ = [
'../_base_/models/vig/pyramid_vig_small.py',
'../_base_/datasets/imagenet_bs128_vig_224.py',
'../_base_/schedules/imagenet_bs256.py',
'../_base_/default_runtime.py',
]
_base_ = [
'../_base_/models/vig/pyramid_vig_tiny.py',
'../_base_/datasets/imagenet_bs128_vig_224.py',
'../_base_/schedules/imagenet_bs256.py',
'../_base_/default_runtime.py',
]
_base_ = [
'../_base_/models/vig/vig_base.py',
'../_base_/datasets/imagenet_bs128_vig_224.py',
'../_base_/schedules/imagenet_bs256.py',
'../_base_/default_runtime.py',
]
_base_ = [
'../_base_/models/vig/vig_small.py',
'../_base_/datasets/imagenet_bs128_vig_224.py',
'../_base_/schedules/imagenet_bs256.py',
'../_base_/default_runtime.py',
]
_base_ = [
'../_base_/models/vig/vig_tiny.py',
'../_base_/datasets/imagenet_bs128_vig_224.py',
'../_base_/schedules/imagenet_bs256.py',
'../_base_/default_runtime.py',
]
# Vision Transformer
> [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929)
<!-- [ALGORITHM] -->
## Introduction
**Vision Transformer**, known as **ViT**, succeeded in using a full transformer to outperform previous works that based on convolutional networks in vision field. ViT splits image into patches to feed the multi-head attentions, concatenates a learnable class token for final prediction and adds a learnable position embeddings for relative positional message between patches. Based on these three techniques with attentions, ViT provides a brand-new pattern to build a basic structure in vision field.
The strategy works even better when coupled with large datasets pre-trainings. Because of its simplicity and effectiveness, some after works in classification field are originated from ViT. And even in recent multi-modality field, ViT-based method still plays a role in it.
<div align=center>
<img src="https://user-images.githubusercontent.com/26739999/142579081-b5718032-6581-472b-8037-ea66aaa9e278.png" width="70%"/>
</div>
## Abstract
<details>
<summary>Show the paper's abstract</summary>
<br>
While the Transformer architecture has become the de-facto standard for natural language processing tasks, its applications to computer vision remain limited. In vision, attention is either applied in conjunction with convolutional networks, or used to replace certain components of convolutional networks while keeping their overall structure in place. We show that this reliance on CNNs is not necessary and a pure transformer applied directly to sequences of image patches can perform very well on image classification tasks. When pre-trained on large amounts of data and transferred to multiple mid-sized or small image recognition benchmarks (ImageNet, CIFAR-100, VTAB, etc.), Vision Transformer (ViT) attains excellent results compared to state-of-the-art convolutional networks while requiring substantially fewer computational resources to train.
</br>
</details>
## How to use it?
<!-- [TABS-BEGIN] -->
**Predict image**
```python
from mmpretrain import inference_model
predict = inference_model('vit-base-p32_in21k-pre_3rdparty_in1k-384px', 'demo/bird.JPEG')
print(predict['pred_class'])
print(predict['pred_score'])
```
**Use the model**
```python
import torch
from mmpretrain import get_model
model = get_model('vit-base-p32_in21k-pre_3rdparty_in1k-384px', pretrained=True)
inputs = torch.rand(1, 3, 224, 224)
out = model(inputs)
print(type(out))
# To extract features.
feats = model.extract_feat(inputs)
print(type(feats))
```
**Train/Test Command**
Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset).
Train:
```shell
python tools/train.py configs/vision_transformer/vit-base-p16_32xb128-mae_in1k.py
```
Test:
```shell
python tools/test.py configs/vision_transformer/vit-base-p32_64xb64_in1k-384px.py https://download.openmmlab.com/mmclassification/v0/vit/finetune/vit-base-p32_in21k-pre-3rdparty_ft-64xb64_in1k-384_20210928-9cea8599.pth
```
<!-- [TABS-END] -->
## Models and results
### Image Classification on ImageNet-1k
| Model | Pretrain | Params (M) | Flops (G) | Top-1 (%) | Top-5 (%) | Config | Download |
| :---------------------------------------------- | :----------: | :--------: | :-------: | :-------: | :-------: | :------------------------------------------: | :----------------------------------------------------------: |
| `vit-base-p32_in21k-pre_3rdparty_in1k-384px`\* | ImageNet-21k | 88.30 | 13.06 | 84.01 | 97.08 | [config](vit-base-p32_64xb64_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/vit/finetune/vit-base-p32_in21k-pre-3rdparty_ft-64xb64_in1k-384_20210928-9cea8599.pth) |
| `vit-base-p16_32xb128-mae_in1k` | From scratch | 86.57 | 17.58 | 82.37 | 96.15 | [config](vit-base-p16_32xb128-mae_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/vit/vit-base-p16_pt-32xb128-mae_in1k_20220623-4c544545.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/vit/vit-base-p16_pt-32xb128-mae_in1k_20220623-4c544545.log) |
| `vit-base-p16_in21k-pre_3rdparty_in1k-384px`\* | ImageNet-21k | 86.86 | 55.54 | 85.43 | 97.77 | [config](vit-base-p16_64xb64_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/vit/finetune/vit-base-p16_in21k-pre-3rdparty_ft-64xb64_in1k-384_20210928-98e8652b.pth) |
| `vit-large-p16_in21k-pre_3rdparty_in1k-384px`\* | ImageNet-21k | 304.72 | 191.21 | 85.63 | 97.63 | [config](vit-large-p16_64xb64_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/vit/finetune/vit-large-p16_in21k-pre-3rdparty_ft-64xb64_in1k-384_20210928-b20ba619.pth) |
*Models with * are converted from the [official repo](https://github.com/google-research/vision_transformer/blob/88a52f8892c80c10de99194990a517b4d80485fd/vit_jax/models.py#L208). The config files of these models are only for inference. We haven't reproduce the training results.*
## Citation
```bibtex
@inproceedings{
dosovitskiy2021an,
title={An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale},
author={Alexey Dosovitskiy and Lucas Beyer and Alexander Kolesnikov and Dirk Weissenborn and Xiaohua Zhai and Thomas Unterthiner and Mostafa Dehghani and Matthias Minderer and Georg Heigold and Sylvain Gelly and Jakob Uszkoreit and Neil Houlsby},
booktitle={International Conference on Learning Representations},
year={2021},
url={https://openreview.net/forum?id=YicbFdNTTy}
}
```
Collections:
- Name: Vision Transformer
Metadata:
Architecture:
- Attention Dropout
- Convolution
- Dense Connections
- Dropout
- GELU
- Layer Normalization
- Multi-Head Attention
- Scaled Dot-Product Attention
- Tanh Activation
Paper:
Title: 'An Image is Worth 16x16 Words: Transformers for Image Recognition at
Scale'
URL: https://arxiv.org/abs/2010.11929
README: configs/vision_transformer/README.md
Code:
URL: https://github.com/open-mmlab/mmpretrain/blob/v0.17.0/mmcls/models/backbones/vision_transformer.py
Version: v0.17.0
Models:
- Name: vit-base-p32_in21k-pre_3rdparty_in1k-384px
Metadata:
FLOPs: 13056716544
Parameters: 88297192
Training Data:
- ImageNet-21k
- ImageNet-1k
In Collection: Vision Transformer
Results:
- Dataset: ImageNet-1k
Task: Image Classification
Metrics:
Top 1 Accuracy: 84.01
Top 5 Accuracy: 97.08
Weights: https://download.openmmlab.com/mmclassification/v0/vit/finetune/vit-base-p32_in21k-pre-3rdparty_ft-64xb64_in1k-384_20210928-9cea8599.pth
Config: configs/vision_transformer/vit-base-p32_64xb64_in1k-384px.py
Converted From:
Weights: https://console.cloud.google.com/storage/browser/_details/vit_models/augreg/B_32-i21k-300ep-lr_0.001-aug_light1-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.01-res_384.npz
Code: https://github.com/google-research/vision_transformer/blob/88a52f8892c80c10de99194990a517b4d80485fd/vit_jax/models.py#L208
- Name: vit-base-p16_32xb128-mae_in1k
Metadata:
FLOPs: 17581972224
Parameters: 86567656
Training Data:
- ImageNet-1k
In Collection: Vision Transformer
Results:
- Dataset: ImageNet-1k
Task: Image Classification
Metrics:
Top 1 Accuracy: 82.37
Top 5 Accuracy: 96.15
Weights: https://download.openmmlab.com/mmclassification/v0/vit/vit-base-p16_pt-32xb128-mae_in1k_20220623-4c544545.pth
Config: configs/vision_transformer/vit-base-p16_32xb128-mae_in1k.py
- Name: vit-base-p16_in21k-pre_3rdparty_in1k-384px
Metadata:
FLOPs: 55538974464
Parameters: 86859496
Training Data:
- ImageNet-21k
- ImageNet-1k
In Collection: Vision Transformer
Results:
- Dataset: ImageNet-1k
Task: Image Classification
Metrics:
Top 1 Accuracy: 85.43
Top 5 Accuracy: 97.77
Weights: https://download.openmmlab.com/mmclassification/v0/vit/finetune/vit-base-p16_in21k-pre-3rdparty_ft-64xb64_in1k-384_20210928-98e8652b.pth
Config: configs/vision_transformer/vit-base-p16_64xb64_in1k-384px.py
Converted From:
Weights: https://console.cloud.google.com/storage/browser/_details/vit_models/augreg/B_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz
Code: https://github.com/google-research/vision_transformer/blob/88a52f8892c80c10de99194990a517b4d80485fd/vit_jax/models.py#L208
- Name: vit-large-p16_in21k-pre_3rdparty_in1k-384px
Metadata:
FLOPs: 191210034176
Parameters: 304715752
Training Data:
- ImageNet-21k
- ImageNet-1k
In Collection: Vision Transformer
Results:
- Dataset: ImageNet-1k
Task: Image Classification
Metrics:
Top 1 Accuracy: 85.63
Top 5 Accuracy: 97.63
Weights: https://download.openmmlab.com/mmclassification/v0/vit/finetune/vit-large-p16_in21k-pre-3rdparty_ft-64xb64_in1k-384_20210928-b20ba619.pth
Config: configs/vision_transformer/vit-large-p16_64xb64_in1k-384px.py
Converted From:
Weights: https://console.cloud.google.com/storage/browser/_details/vit_models/augreg/L_16-i21k-300ep-lr_0.001-aug_strong1-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.01-res_384.npz
Code: https://github.com/google-research/vision_transformer/blob/88a52f8892c80c10de99194990a517b4d80485fd/vit_jax/models.py#L208
_base_ = [
'../_base_/datasets/imagenet_bs64_swin_224.py',
'../_base_/schedules/imagenet_bs1024_adamw_swin.py',
'../_base_/default_runtime.py'
]
# model settings
model = dict(
type='ImageClassifier',
backbone=dict(
type='VisionTransformer',
arch='base',
img_size=224,
patch_size=16,
drop_path_rate=0.1),
neck=None,
head=dict(
type='VisionTransformerClsHead',
num_classes=1000,
in_channels=768,
loss=dict(
type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'),
),
init_cfg=[
dict(type='TruncNormal', layer='Linear', std=.02),
dict(type='Constant', layer='LayerNorm', val=1., bias=0.),
],
train_cfg=dict(augments=[
dict(type='Mixup', alpha=0.8),
dict(type='CutMix', alpha=1.0)
]))
# dataset settings
train_dataloader = dict(batch_size=128)
# schedule settings
optim_wrapper = dict(
optimizer=dict(
type='AdamW',
lr=1e-4 * 4096 / 256,
weight_decay=0.3,
eps=1e-8,
betas=(0.9, 0.95)),
paramwise_cfg=dict(
norm_decay_mult=0.0,
bias_decay_mult=0.0,
custom_keys={
'.cls_token': dict(decay_mult=0.0),
'.pos_embed': dict(decay_mult=0.0)
}))
# runtime settings
custom_hooks = [dict(type='EMAHook', momentum=1e-4)]
# NOTE: `auto_scale_lr` is for automatically scaling LR
# based on the actual training batch size.
# base_batch_size = (32 GPUs) x (128 samples per GPU)
auto_scale_lr = dict(base_batch_size=4096)
_base_ = [
'../_base_/models/vit-base-p16.py',
'../_base_/datasets/imagenet_bs64_pil_resize_autoaug.py',
'../_base_/default_runtime.py'
]
# specific to vit pretrain
paramwise_cfg = dict(custom_keys={
'.cls_token': dict(decay_mult=0.0),
'.pos_embed': dict(decay_mult=0.0)
})
pretrained = 'https://download.openmmlab.com/mmclassification/v0/vit/pretrain/vit-base-p16_3rdparty_pt-64xb64_in1k-224_20210928-02284250.pth' # noqa
model = dict(
head=dict(
loss=dict(type='CrossEntropyLoss', loss_weight=1.0, _delete_=True), ),
backbone=dict(
img_size=224,
init_cfg=dict(
type='Pretrained',
checkpoint=pretrained,
_delete_=True,
prefix='backbone')))
img_norm_cfg = dict(
mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='RandomResizedCrop', scale=224, backend='pillow'),
dict(type='RandomFlip', prob=0.5, direction='horizontal'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='ToTensor', keys=['gt_label']),
dict(type='ToHalf', keys=['img']),
dict(type='Collect', keys=['img', 'gt_label'])
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=(224, -1), keep_ratio=True, backend='pillow'),
dict(type='CenterCrop', crop_size=224),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='ToHalf', keys=['img']),
dict(type='Collect', keys=['img'])
]
# change batch size
data = dict(
samples_per_gpu=17,
workers_per_gpu=16,
drop_last=True,
train=dict(pipeline=train_pipeline),
train_dataloader=dict(mode='async'),
val=dict(pipeline=test_pipeline, ),
val_dataloader=dict(samples_per_gpu=4, workers_per_gpu=1),
test=dict(pipeline=test_pipeline),
test_dataloader=dict(samples_per_gpu=4, workers_per_gpu=1))
# optimizer
optimizer = dict(
type='SGD',
lr=0.08,
weight_decay=1e-5,
momentum=0.9,
paramwise_cfg=paramwise_cfg,
)
# learning policy
param_scheduler = [
dict(type='LinearLR', start_factor=0.02, by_epoch=False, begin=0, end=800),
dict(
type='CosineAnnealingLR',
T_max=4200,
by_epoch=False,
begin=800,
end=5000)
]
# ipu cfg
# model partition config
ipu_model_cfg = dict(
train_split_edges=[
dict(layer_to_call='backbone.patch_embed', ipu_id=0),
dict(layer_to_call='backbone.layers.3', ipu_id=1),
dict(layer_to_call='backbone.layers.6', ipu_id=2),
dict(layer_to_call='backbone.layers.9', ipu_id=3)
],
train_ckpt_nodes=['backbone.layers.{}'.format(i) for i in range(12)])
# device config
options_cfg = dict(
randomSeed=42,
partialsType='half',
train_cfg=dict(
executionStrategy='SameAsIpu',
Training=dict(gradientAccumulation=32),
availableMemoryProportion=[0.3, 0.3, 0.3, 0.3],
),
eval_cfg=dict(deviceIterations=1, ),
)
# add model partition config and device config to runner
runner = dict(
type='IterBasedRunner',
ipu_model_cfg=ipu_model_cfg,
options_cfg=options_cfg,
max_iters=5000)
default_hooks = dict(checkpoint=dict(type='CheckpointHook', interval=1000))
fp16 = dict(loss_scale=256.0, velocity_accum_type='half', accum_type='half')
_base_ = [
'../_base_/models/vit-base-p16.py',
'../_base_/datasets/imagenet_bs64_pil_resize.py',
'../_base_/schedules/imagenet_bs4096_AdamW.py',
'../_base_/default_runtime.py'
]
# model setting
model = dict(backbone=dict(img_size=384))
# dataset setting
data_preprocessor = dict(
mean=[127.5, 127.5, 127.5],
std=[127.5, 127.5, 127.5],
# convert image from BGR to RGB
to_rgb=True,
)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='RandomResizedCrop', scale=384, backend='pillow'),
dict(type='RandomFlip', prob=0.5, direction='horizontal'),
dict(type='PackInputs'),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='ResizeEdge', scale=384, edge='short', backend='pillow'),
dict(type='CenterCrop', crop_size=384),
dict(type='PackInputs'),
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = dict(dataset=dict(pipeline=test_pipeline))
# schedule setting
optim_wrapper = dict(clip_grad=dict(max_norm=1.0))
_base_ = [
'../_base_/models/vit-base-p16.py',
'../_base_/datasets/imagenet_bs64_pil_resize_autoaug.py',
'../_base_/schedules/imagenet_bs4096_AdamW.py',
'../_base_/default_runtime.py'
]
# model setting
model = dict(
head=dict(hidden_dim=3072),
train_cfg=dict(augments=dict(type='Mixup', alpha=0.2)),
)
# schedule setting
optim_wrapper = dict(clip_grad=dict(max_norm=1.0))
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment