From 0fd8347d797c0882c759802497078b5823d03d0a Mon Sep 17 00:00:00 2001
From: unknown <365893829@qq.com>
Date: Sun, 8 Jan 2023 11:20:38 +0800
Subject: [PATCH] =?UTF-8?q?=E6=B7=BB=E5=8A=A0mmclassification-0.24.1?=
=?UTF-8?q?=E4=BB=A3=E7=A0=81=EF=BC=8C=E5=88=A0=E9=99=A4mmclassification-s?=
=?UTF-8?q?peed-benchmark?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../mmclassification-0.24.1/.gitattributes | 2 +
.../mmclassification-0.24.1/.gitignore | 134 +
.../.pre-commit-config.yaml | 58 +
.../mmclassification-0.24.1/.readthedocs.yml | 9 +
.../mmclassification-0.24.1/CITATION.cff | 9 +
.../mmclassification-0.24.1/CONTRIBUTING.md | 61 +
.../LICENSE | 2 +-
.../mmclassification-0.24.1/MANIFEST.in | 4 +
.../mmclassification-0.24.1/README.md | 207 ++
.../mmclassification-0.24.1/README_zh-CN.md | 222 ++
.../configs/_base_/datasets/cifar100_bs16.py | 0
.../configs/_base_/datasets/cifar10_bs16.py | 0
.../configs/_base_/datasets/cub_bs8_384.py | 54 +
.../configs/_base_/datasets/cub_bs8_448.py | 54 +
.../_base_/datasets/imagenet21k_bs128.py | 43 +
.../imagenet_bs128_poolformer_medium_224.py | 71 +
.../imagenet_bs128_poolformer_small_224.py | 71 +
.../_base_/datasets/imagenet_bs256_rsb_a12.py | 53 +
.../_base_/datasets/imagenet_bs256_rsb_a3.py | 53 +
.../configs/_base_/datasets/imagenet_bs32.py | 40 +
.../datasets/imagenet_bs32_pil_bicubic.py | 48 +
.../datasets/imagenet_bs32_pil_resize.py | 6 +-
.../configs/_base_/datasets/imagenet_bs64.py | 0
.../_base_/datasets/imagenet_bs64_autoaug.py | 43 +
.../datasets/imagenet_bs64_convmixer_224.py | 71 +
.../datasets/imagenet_bs64_mixer_224.py | 48 +
.../datasets/imagenet_bs64_pil_resize.py | 4 +-
.../imagenet_bs64_pil_resize_autoaug.py | 53 +
.../_base_/datasets/imagenet_bs64_swin_224.py | 71 +
.../_base_/datasets/imagenet_bs64_swin_256.py | 71 +
.../_base_/datasets/imagenet_bs64_swin_384.py | 43 +
.../_base_/datasets/imagenet_bs64_t2t_224.py | 71 +
.../_base_/datasets/pipelines/auto_aug.py | 96 +
.../_base_/datasets/pipelines/rand_aug.py | 43 +
.../_base_/datasets/stanford_cars_bs8_448.py | 46 +
.../configs/_base_/datasets/voc_bs16.py | 0
.../configs/_base_/default_runtime.py | 0
.../_base_/models/conformer/base-p16.py | 22 +
.../_base_/models/conformer/small-p16.py | 22 +
.../_base_/models/conformer/small-p32.py | 26 +
.../_base_/models/conformer/tiny-p16.py | 22 +
.../models/convmixer/convmixer-1024-20.py | 11 +
.../models/convmixer/convmixer-1536-20.py | 11 +
.../models/convmixer/convmixer-768-32.py | 11 +
.../_base_/models/convnext/convnext-base.py | 23 +
.../_base_/models/convnext/convnext-large.py | 23 +
.../_base_/models/convnext/convnext-small.py | 23 +
.../_base_/models/convnext/convnext-tiny.py | 23 +
.../_base_/models/convnext/convnext-xlarge.py | 23 +
.../_base_/models/densenet/densenet121.py | 11 +
.../_base_/models/densenet/densenet161.py | 11 +
.../_base_/models/densenet/densenet169.py | 11 +
.../_base_/models/densenet/densenet201.py | 11 +
.../configs/_base_/models/efficientnet_b0.py | 12 +
.../configs/_base_/models/efficientnet_b1.py | 12 +
.../configs/_base_/models/efficientnet_b2.py | 12 +
.../configs/_base_/models/efficientnet_b3.py | 12 +
.../configs/_base_/models/efficientnet_b4.py | 12 +
.../configs/_base_/models/efficientnet_b5.py | 12 +
.../configs/_base_/models/efficientnet_b6.py | 12 +
.../configs/_base_/models/efficientnet_b7.py | 12 +
.../configs/_base_/models/efficientnet_b8.py | 12 +
.../configs/_base_/models/efficientnet_em.py | 13 +
.../configs/_base_/models/efficientnet_es.py | 13 +
.../_base_/models/hornet/hornet-base-gf.py | 21 +
.../_base_/models/hornet/hornet-base.py | 21 +
.../_base_/models/hornet/hornet-large-gf.py | 21 +
.../models/hornet/hornet-large-gf384.py | 17 +
.../_base_/models/hornet/hornet-large.py | 21 +
.../_base_/models/hornet/hornet-small-gf.py | 21 +
.../_base_/models/hornet/hornet-small.py | 21 +
.../_base_/models/hornet/hornet-tiny-gf.py | 21 +
.../_base_/models/hornet/hornet-tiny.py | 21 +
.../configs/_base_/models/hrnet/hrnet-w18.py | 15 +
.../configs/_base_/models/hrnet/hrnet-w30.py | 15 +
.../configs/_base_/models/hrnet/hrnet-w32.py | 15 +
.../configs/_base_/models/hrnet/hrnet-w40.py | 15 +
.../configs/_base_/models/hrnet/hrnet-w44.py | 15 +
.../configs/_base_/models/hrnet/hrnet-w48.py | 15 +
.../configs/_base_/models/hrnet/hrnet-w64.py | 15 +
.../_base_/models/mlp_mixer_base_patch16.py | 25 +
.../_base_/models/mlp_mixer_large_patch16.py | 25 +
.../configs/_base_/models/mobilenet_v2_1x.py | 0
.../models/mobilenet_v3_large_imagenet.py | 16 +
.../_base_/models/mobilenet_v3_small_cifar.py | 13 +
.../models/mobilenet_v3_small_imagenet.py | 16 +
.../configs/_base_/models/mvit/mvitv2-base.py | 19 +
.../_base_/models/mvit/mvitv2-large.py | 23 +
.../_base_/models/mvit/mvitv2-small.py | 19 +
.../configs/_base_/models/mvit/mvitv2-tiny.py | 19 +
.../models/poolformer/poolformer_m36.py | 22 +
.../models/poolformer/poolformer_m48.py | 22 +
.../models/poolformer/poolformer_s12.py | 22 +
.../models/poolformer/poolformer_s24.py | 22 +
.../models/poolformer/poolformer_s36.py | 22 +
.../_base_/models/regnet/regnetx_1.6gf.py | 0
.../_base_/models/regnet/regnetx_12gf.py | 0
.../_base_/models/regnet/regnetx_3.2gf.py | 0
.../_base_/models/regnet/regnetx_4.0gf.py | 0
.../_base_/models/regnet/regnetx_400mf.py | 0
.../_base_/models/regnet/regnetx_6.4gf.py | 0
.../_base_/models/regnet/regnetx_8.0gf.py | 0
.../_base_/models/regnet/regnetx_800mf.py | 0
.../configs/_base_/models/repmlp-base_224.py | 18 +
.../configs/_base_/models/repvgg-A0_in1k.py | 15 +
.../_base_/models/repvgg-B3_lbs-mixup_in1k.py | 23 +
.../_base_/models/res2net101-w26-s4.py | 18 +
.../configs/_base_/models/res2net50-w14-s8.py | 18 +
.../configs/_base_/models/res2net50-w26-s4.py | 18 +
.../configs/_base_/models/res2net50-w26-s6.py | 18 +
.../configs/_base_/models/res2net50-w26-s8.py | 18 +
.../configs/_base_/models/res2net50-w48-s2.py | 18 +
.../configs/_base_/models/resnest101.py | 24 +
.../configs/_base_/models/resnest200.py | 24 +
.../configs/_base_/models/resnest269.py | 24 +
.../configs/_base_/models/resnest50.py | 23 +
.../configs/_base_/models/resnet101.py | 0
.../configs/_base_/models/resnet101_cifar.py | 0
.../configs/_base_/models/resnet152.py | 0
.../configs/_base_/models/resnet152_cifar.py | 0
.../configs/_base_/models/resnet18.py | 0
.../configs/_base_/models/resnet18_cifar.py | 0
.../configs/_base_/models/resnet34.py | 0
.../configs/_base_/models/resnet34_cifar.py | 0
.../configs/_base_/models/resnet34_gem.py | 17 +
.../configs/_base_/models/resnet50.py | 0
.../configs/_base_/models/resnet50_cifar.py | 0
.../_base_/models/resnet50_cifar_cutmix.py | 0
.../_base_/models/resnet50_cifar_mixup.py | 0
.../configs/_base_/models/resnet50_cutmix.py | 0
.../_base_/models/resnet50_label_smooth.py | 0
.../configs/_base_/models/resnet50_mixup.py | 0
.../configs/_base_/models/resnetv1c50.py | 17 +
.../configs/_base_/models/resnetv1d101.py | 0
.../configs/_base_/models/resnetv1d152.py | 0
.../configs/_base_/models/resnetv1d50.py | 0
.../configs/_base_/models/resnext101_32x4d.py | 0
.../configs/_base_/models/resnext101_32x8d.py | 0
.../configs/_base_/models/resnext152_32x4d.py | 0
.../configs/_base_/models/resnext50_32x4d.py | 0
.../configs/_base_/models/seresnet101.py | 0
.../configs/_base_/models/seresnet50.py | 0
.../_base_/models/seresnext101_32x4d.py | 0
.../_base_/models/seresnext50_32x4d.py | 0
.../configs/_base_/models/shufflenet_v1_1x.py | 0
.../configs/_base_/models/shufflenet_v2_1x.py | 0
.../models/swin_transformer/base_224.py | 22 +
.../models/swin_transformer/base_384.py | 16 +
.../models/swin_transformer/large_224.py | 12 +
.../models/swin_transformer/large_384.py | 16 +
.../models/swin_transformer/small_224.py | 23 +
.../models/swin_transformer/tiny_224.py | 22 +
.../models/swin_transformer_v2/base_256.py | 25 +
.../models/swin_transformer_v2/base_384.py | 17 +
.../models/swin_transformer_v2/large_256.py | 16 +
.../models/swin_transformer_v2/large_384.py | 16 +
.../models/swin_transformer_v2/small_256.py | 25 +
.../models/swin_transformer_v2/tiny_256.py | 25 +
.../configs/_base_/models/t2t-vit-t-14.py | 41 +
.../configs/_base_/models/t2t-vit-t-19.py | 41 +
.../configs/_base_/models/t2t-vit-t-24.py | 41 +
.../_base_/models/tnt_s_patch16_224.py | 29 +
.../configs/_base_/models/twins_pcpvt_base.py | 30 +
.../configs/_base_/models/twins_svt_base.py | 30 +
.../configs/_base_/models/van/van_b0.py | 21 +
.../configs/_base_/models/van/van_b1.py | 21 +
.../configs/_base_/models/van/van_b2.py | 13 +
.../configs/_base_/models/van/van_b3.py | 13 +
.../configs/_base_/models/van/van_b4.py | 13 +
.../configs/_base_/models/van/van_b5.py | 13 +
.../configs/_base_/models/van/van_b6.py | 13 +
.../configs/_base_/models/van/van_base.py | 1 +
.../configs/_base_/models/van/van_large.py | 1 +
.../configs/_base_/models/van/van_small.py | 1 +
.../configs/_base_/models/van/van_tiny.py | 1 +
.../configs/_base_/models/vgg11.py | 0
.../configs/_base_/models/vgg11bn.py | 0
.../configs/_base_/models/vgg13.py | 0
.../configs/_base_/models/vgg13bn.py | 0
.../configs/_base_/models/vgg16.py | 0
.../configs/_base_/models/vgg16bn.py | 0
.../configs/_base_/models/vgg19.py | 0
.../configs/_base_/models/vgg19bn.py | 0
.../configs/_base_/models/vit-base-p16.py | 25 +
.../configs/_base_/models/vit-base-p32.py | 24 +
.../configs/_base_/models/vit-large-p16.py | 24 +
.../configs/_base_/models/vit-large-p32.py | 24 +
.../configs/_base_/models/wide-resnet50.py | 20 +
.../configs/_base_/schedules/cifar10_bs128.py | 0
.../configs/_base_/schedules/cub_bs64.py | 13 +
.../imagenet_bs1024_adamw_conformer.py | 29 +
.../schedules/imagenet_bs1024_adamw_swin.py | 30 +
.../_base_/schedules/imagenet_bs1024_coslr.py | 12 +
.../imagenet_bs1024_linearlr_bn_nowd.py | 0
.../_base_/schedules/imagenet_bs2048.py | 0
.../_base_/schedules/imagenet_bs2048_AdamW.py | 0
.../_base_/schedules/imagenet_bs2048_coslr.py | 0
.../_base_/schedules/imagenet_bs2048_rsb.py | 12 +
.../_base_/schedules/imagenet_bs256.py | 0
.../_base_/schedules/imagenet_bs256_140e.py | 0
.../imagenet_bs256_200e_coslr_warmup.py | 11 +
.../_base_/schedules/imagenet_bs256_coslr.py | 0
.../schedules/imagenet_bs256_epochstep.py | 0
.../_base_/schedules/imagenet_bs4096_AdamW.py | 24 +
.../_base_/schedules/stanford_cars_bs8.py | 7 +
.../configs/conformer/README.md | 37 +
.../conformer-base-p16_8xb128_in1k.py | 9 +
.../conformer-small-p16_8xb128_in1k.py | 9 +
.../conformer-small-p32_8xb128_in1k.py | 9 +
.../conformer-tiny-p16_8xb128_in1k.py | 9 +
.../configs/conformer/metafile.yml | 78 +
.../configs/convmixer/README.md | 42 +
.../convmixer-1024-20_10xb64_in1k.py | 10 +
.../convmixer-1536-20_10xb64_in1k.py | 10 +
.../convmixer/convmixer-768-32_10xb64_in1k.py | 10 +
.../configs/convmixer/metafile.yml | 61 +
.../configs/convnext/README.md | 59 +
.../convnext/convnext-base_32xb128_in1k.py | 12 +
.../convnext/convnext-large_64xb64_in1k.py | 12 +
.../convnext/convnext-small_32xb128_in1k.py | 12 +
.../convnext/convnext-tiny_32xb128_in1k.py | 12 +
.../convnext/convnext-xlarge_64xb64_in1k.py | 12 +
.../configs/convnext/metafile.yml | 221 ++
.../configs/cspnet/README.md | 41 +
.../configs/cspnet/cspdarknet50_8xb32_in1k.py | 65 +
.../configs/cspnet/cspresnet50_8xb32_in1k.py | 66 +
.../configs/cspnet/cspresnext50_8xb32_in1k.py | 65 +
.../configs/cspnet/metafile.yml | 64 +
.../configs/csra/README.md | 36 +
.../configs/csra/metafile.yml | 29 +
.../csra/resnet101-csra_1xb16_voc07-448px.py | 75 +
.../configs/deit/README.md | 52 +
...eit-base-distilled_ft-16xb32_in1k-384px.py | 9 +
.../deit-base-distilled_pt-16xb64_in1k.py | 10 +
.../deit/deit-base_ft-16xb32_in1k-384px.py | 29 +
.../configs/deit/deit-base_pt-16xb64_in1k.py | 13 +
.../deit-small-distilled_pt-4xb256_in1k.py | 7 +
.../configs/deit/deit-small_pt-4xb256_in1k.py | 44 +
.../deit-tiny-distilled_pt-4xb256_in1k.py | 7 +
.../configs/deit/deit-tiny_pt-4xb256_in1k.py | 7 +
.../configs/deit/metafile.yml | 153 ++
.../configs/densenet/README.md | 41 +
.../densenet/densenet121_4xb256_in1k.py | 10 +
.../densenet/densenet161_4xb256_in1k.py | 10 +
.../densenet/densenet169_4xb256_in1k.py | 10 +
.../densenet/densenet201_4xb256_in1k.py | 10 +
.../configs/densenet/metafile.yml | 76 +
.../configs/efficientformer/README.md | 47 +
.../efficientformer-l1_8xb128_in1k.py | 24 +
.../efficientformer-l3_8xb128_in1k.py | 24 +
.../efficientformer-l7_8xb128_in1k.py | 24 +
.../configs/efficientformer/metafile.yml | 67 +
.../configs/efficientnet/README.md | 62 +
.../efficientnet-b0_8xb32-01norm_in1k.py | 39 +
.../efficientnet-b0_8xb32_in1k.py | 39 +
.../efficientnet-b1_8xb32-01norm_in1k.py | 39 +
.../efficientnet-b1_8xb32_in1k.py | 39 +
.../efficientnet-b2_8xb32-01norm_in1k.py | 39 +
.../efficientnet-b2_8xb32_in1k.py | 39 +
.../efficientnet-b3_8xb32-01norm_in1k.py | 39 +
.../efficientnet-b3_8xb32_in1k.py | 39 +
.../efficientnet-b4_8xb32-01norm_in1k.py | 39 +
.../efficientnet-b4_8xb32_in1k.py | 39 +
.../efficientnet-b5_8xb32-01norm_in1k.py | 39 +
.../efficientnet-b5_8xb32_in1k.py | 39 +
.../efficientnet-b6_8xb32-01norm_in1k.py | 39 +
.../efficientnet-b6_8xb32_in1k.py | 39 +
.../efficientnet-b7_8xb32-01norm_in1k.py | 39 +
.../efficientnet-b7_8xb32_in1k.py | 39 +
.../efficientnet-b8_8xb32-01norm_in1k.py | 39 +
.../efficientnet-b8_8xb32_in1k.py | 39 +
.../efficientnet-em_8xb32-01norm_in1k.py | 39 +
.../efficientnet-es_8xb32-01norm_in1k.py | 39 +
.../configs/efficientnet/metafile.yml | 391 +++
.../resnet50_b32x8_fp16_dynamic_imagenet.py | 6 +
.../fp16/resnet50_b32x8_fp16_imagenet.py | 6 +
.../configs/hornet/README.md | 51 +
.../hornet/hornet-base-gf_8xb64_in1k.py | 13 +
.../configs/hornet/hornet-base_8xb64_in1k.py | 13 +
.../hornet/hornet-small-gf_8xb64_in1k.py | 13 +
.../configs/hornet/hornet-small_8xb64_in1k.py | 13 +
.../hornet/hornet-tiny-gf_8xb128_in1k.py | 13 +
.../configs/hornet/hornet-tiny_8xb128_in1k.py | 13 +
.../configs/hornet/metafile.yml | 97 +
.../configs/hrnet/README.md | 44 +
.../configs/hrnet/hrnet-w18_4xb32_in1k.py | 6 +
.../configs/hrnet/hrnet-w30_4xb32_in1k.py | 6 +
.../configs/hrnet/hrnet-w32_4xb32_in1k.py | 6 +
.../configs/hrnet/hrnet-w40_4xb32_in1k.py | 6 +
.../configs/hrnet/hrnet-w44_4xb32_in1k.py | 6 +
.../configs/hrnet/hrnet-w48_4xb32_in1k.py | 6 +
.../configs/hrnet/hrnet-w64_4xb32_in1k.py | 6 +
.../configs/hrnet/metafile.yml | 162 ++
.../configs/lenet/README.md | 28 +
.../configs/lenet/lenet5_mnist.py | 0
.../configs/mlp_mixer/README.md | 37 +
.../configs/mlp_mixer/metafile.yml | 50 +
.../mlp-mixer-base-p16_64xb64_in1k.py | 6 +
.../mlp-mixer-large-p16_64xb64_in1k.py | 6 +
.../configs/mobilenet_v2/README.md | 38 +
.../configs/mobilenet_v2/metafile.yml | 34 +
.../mobilenet_v2/mobilenet-v2_8xb32_in1k.py | 8 +
.../mobilenet_v2_b32x8_imagenet.py | 6 +
.../configs/mobilenet_v3/README.md | 36 +
.../configs/mobilenet_v3/metafile.yml | 47 +
.../mobilenet-v3-large_8xb32_in1k.py | 158 ++
.../mobilenet-v3-small_8xb16_cifar10.py | 8 +
.../mobilenet-v3-small_8xb32_in1k.py | 158 ++
.../mobilenet_v3_large_imagenet.py | 6 +
.../mobilenet_v3/mobilenet_v3_small_cifar.py | 6 +
.../mobilenet_v3_small_imagenet.py | 6 +
.../configs/mvit/README.md | 44 +
.../configs/mvit/metafile.yml | 95 +
.../configs/mvit/mvitv2-base_8xb256_in1k.py | 29 +
.../configs/mvit/mvitv2-large_8xb256_in1k.py | 29 +
.../configs/mvit/mvitv2-small_8xb256_in1k.py | 29 +
.../configs/mvit/mvitv2-tiny_8xb256_in1k.py | 29 +
.../configs/poolformer/README.md | 38 +
.../configs/poolformer/metafile.yml | 99 +
.../poolformer/poolformer-m36_32xb128_in1k.py | 8 +
.../poolformer/poolformer-m48_32xb128_in1k.py | 8 +
.../poolformer/poolformer-s12_32xb128_in1k.py | 8 +
.../poolformer/poolformer-s24_32xb128_in1k.py | 8 +
.../poolformer/poolformer-s36_32xb128_in1k.py | 8 +
.../configs/regnet/README.md | 51 +
.../configs/regnet/metafile.yml | 122 +
.../regnet/regnetx-1.6gf_8xb128_in1k.py | 6 +
.../configs/regnet/regnetx-12gf_8xb64_in1k.py | 11 +
.../regnet/regnetx-3.2gf_8xb64_in1k.py | 11 +
.../regnet/regnetx-4.0gf_8xb64_in1k.py | 11 +
.../regnet/regnetx-400mf_8xb128_in1k.py | 77 +
.../regnet/regnetx-6.4gf_8xb64_in1k.py | 11 +
.../regnet/regnetx-8.0gf_8xb64_in1k.py | 11 +
.../regnet/regnetx-800mf_8xb128_in1k.py | 6 +
.../configs/repmlp/README.md | 93 +
.../configs/repmlp/metafile.yml | 48 +
.../repmlp/repmlp-base_8xb64_in1k-256px.py | 21 +
.../configs/repmlp/repmlp-base_8xb64_in1k.py | 20 +
.../repmlp/repmlp-base_delopy_8xb64_in1k.py | 3 +
.../repmlp-base_deploy_8xb64_in1k-256px.py | 3 +
.../configs/repvgg/README.md | 101 +
.../repvgg-A0_deploy_4xb64-coslr-120e_in1k.py | 3 +
.../repvgg-A1_deploy_4xb64-coslr-120e_in1k.py | 3 +
.../repvgg-A2_deploy_4xb64-coslr-120e_in1k.py | 3 +
.../repvgg-B0_deploy_4xb64-coslr-120e_in1k.py | 3 +
.../repvgg-B1_deploy_4xb64-coslr-120e_in1k.py | 3 +
...epvgg-B1g2_deploy_4xb64-coslr-120e_in1k.py | 3 +
...epvgg-B1g4_deploy_4xb64-coslr-120e_in1k.py | 3 +
.../repvgg-B2_deploy_4xb64-coslr-120e_in1k.py | 3 +
...4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py | 3 +
...4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py | 3 +
...4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py | 3 +
...4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py | 3 +
.../configs/repvgg/metafile.yml | 208 ++
.../repvgg/repvgg-A0_4xb64-coslr-120e_in1k.py | 8 +
.../repvgg/repvgg-A1_4xb64-coslr-120e_in1k.py | 3 +
.../repvgg/repvgg-A2_4xb64-coslr-120e_in1k.py | 3 +
.../repvgg/repvgg-B0_4xb64-coslr-120e_in1k.py | 3 +
.../repvgg/repvgg-B1_4xb64-coslr-120e_in1k.py | 3 +
.../repvgg-B1g2_4xb64-coslr-120e_in1k.py | 3 +
.../repvgg-B1g4_4xb64-coslr-120e_in1k.py | 3 +
.../repvgg/repvgg-B2_4xb64-coslr-120e_in1k.py | 3 +
...4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py | 3 +
...4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py | 6 +
...4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py | 3 +
...4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py | 3 +
.../configs/res2net/README.md | 37 +
.../configs/res2net/metafile.yml | 70 +
.../res2net/res2net101-w26-s4_8xb32_in1k.py | 5 +
.../res2net/res2net50-w14-s8_8xb32_in1k.py | 5 +
.../res2net/res2net50-w26-s8_8xb32_in1k.py | 5 +
.../configs/resnest/README.md | 26 +
.../configs/resnest/resnest101_32xb64_in1k.py | 181 ++
.../resnest/resnest101_b64x32_imagenet.py | 6 +
.../configs/resnest/resnest200_64xb32_in1k.py | 181 ++
.../resnest/resnest200_b32x64_imagenet.py | 6 +
.../configs/resnest/resnest269_64xb32_in1k.py | 181 ++
.../resnest/resnest269_b32x64_imagenet.py | 6 +
.../configs/resnest/resnest50_32xb64_in1k.py | 181 ++
.../resnest/resnest50_b64x32_imagenet.py | 6 +
.../configs/resnet/README.md | 91 +
.../configs/resnet/metafile.yml | 365 +++
.../resnet/resnet101_8xb16_cifar10.py} | 0
.../configs/resnet/resnet101_8xb32_in1k.py} | 0
.../configs/resnet/resnet101_b16x8_cifar10.py | 6 +
.../resnet/resnet101_b32x8_imagenet.py | 6 +
.../resnet/resnet152_8xb16_cifar10.py} | 0
.../configs/resnet/resnet152_8xb32_in1k.py} | 0
.../configs/resnet/resnet152_b16x8_cifar10.py | 6 +
.../resnet/resnet152_b32x8_imagenet.py | 6 +
.../configs/resnet/resnet18_8xb16_cifar10.py} | 0
.../configs/resnet/resnet18_8xb32_in1k.py} | 0
.../configs/resnet/resnet18_b16x8_cifar10.py | 6 +
.../configs/resnet/resnet18_b32x8_imagenet.py | 6 +
.../configs/resnet/resnet34_8xb16_cifar10.py} | 0
.../configs/resnet/resnet34_8xb32_in1k.py} | 0
.../configs/resnet/resnet34_b16x8_cifar10.py | 6 +
.../configs/resnet/resnet34_b32x8_imagenet.py | 6 +
.../resnet50_32xb64-warmup-coslr_in1k.py} | 0
.../resnet/resnet50_32xb64-warmup-lbs_in1k.py | 12 +
.../resnet/resnet50_32xb64-warmup_in1k.py} | 0
.../resnet/resnet50_8xb128_coslr-90e_in21k.py | 11 +
.../resnet/resnet50_8xb16-mixup_cifar10.py} | 0
.../configs/resnet/resnet50_8xb16_cifar10.py} | 0
.../resnet/resnet50_8xb16_cifar100.py} | 0
.../resnet50_8xb256-rsb-a1-600e_in1k.py | 33 +
.../resnet50_8xb256-rsb-a2-300e_in1k.py | 25 +
.../resnet50_8xb256-rsb-a3-100e_in1k.py | 19 +
.../resnet50_8xb32-coslr-preciseBN_in1k.py | 12 +
.../resnet/resnet50_8xb32-coslr_in1k.py} | 0
.../resnet/resnet50_8xb32-cutmix_in1k.py} | 0
.../resnet50_8xb32-fp16-dynamic_in1k.py | 4 +
.../resnet/resnet50_8xb32-fp16_in1k.py | 4 +
.../resnet/resnet50_8xb32-lbs_in1k.py} | 0
.../resnet/resnet50_8xb32-mixup_in1k.py} | 0
.../configs/resnet/resnet50_8xb32_in1k.py | 6 +
.../configs/resnet/resnet50_8xb8_cars.py | 19 +
.../configs/resnet/resnet50_8xb8_cub.py | 19 +
.../configs/resnet/resnet50_b16x8_cifar10.py | 6 +
.../configs/resnet/resnet50_b16x8_cifar100.py | 6 +
.../resnet/resnet50_b16x8_cifar10_mixup.py | 6 +
.../resnet/resnet50_b32x8_coslr_imagenet.py | 6 +
.../resnet/resnet50_b32x8_cutmix_imagenet.py | 6 +
.../configs/resnet/resnet50_b32x8_imagenet.py | 6 +
.../resnet50_b32x8_label_smooth_imagenet.py | 6 +
.../resnet/resnet50_b32x8_mixup_imagenet.py | 6 +
.../resnet50_b64x32_warmup_coslr_imagenet.py | 6 +
.../resnet/resnet50_b64x32_warmup_imagenet.py | 6 +
...t50_b64x32_warmup_label_smooth_imagenet.py | 6 +
.../configs/resnet/resnetv1c101_8xb32_in1k.py | 7 +
.../configs/resnet/resnetv1c152_8xb32_in1k.py | 7 +
.../configs/resnet/resnetv1c50_8xb32_in1k.py | 5 +
.../resnet/resnetv1d101_8xb32_in1k.py} | 0
.../resnet/resnetv1d101_b32x8_imagenet.py | 6 +
.../resnet/resnetv1d152_8xb32_in1k.py} | 0
.../resnet/resnetv1d152_b32x8_imagenet.py | 6 +
.../configs/resnet/resnetv1d50_8xb32_in1k.py} | 0
.../resnet/resnetv1d50_b32x8_imagenet.py | 6 +
.../configs/resnext/README.md | 36 +
.../configs/resnext/metafile.yml | 73 +
.../resnext/resnext101-32x4d_8xb32_in1k.py} | 0
.../resnext/resnext101-32x8d_8xb32_in1k.py} | 0
.../resnext101_32x4d_b32x8_imagenet.py | 6 +
.../resnext101_32x8d_b32x8_imagenet.py | 6 +
.../resnext/resnext152-32x4d_8xb32_in1k.py} | 0
.../resnext152_32x4d_b32x8_imagenet.py | 6 +
.../resnext/resnext50-32x4d_8xb32_in1k.py} | 0
.../resnext/resnext50_32x4d_b32x8_imagenet.py | 6 +
.../configs/seresnet/README.md | 34 +
.../configs/seresnet/metafile.yml | 47 +
.../seresnet/seresnet101_8xb32_in1k.py} | 0
.../seresnet/seresnet101_b32x8_imagenet.py | 6 +
.../seresnet/seresnet50_8xb32_in1k.py} | 0
.../seresnet/seresnet50_b32x8_imagenet.py | 6 +
.../seresnext101-32x4d_8xb32_in1k.py} | 0
.../seresnext101_32x4d_b32x8_imagenet.py | 6 +
.../seresnet/seresnext50-32x4d_8xb32_in1k.py} | 0
.../seresnext50_32x4d_b32x8_imagenet.py | 6 +
.../configs/shufflenet_v1/README.md | 33 +
.../configs/shufflenet_v1/metafile.yml | 35 +
.../shufflenet-v1-1x_16xb64_in1k.py} | 0
..._v1_1x_b64x16_linearlr_bn_nowd_imagenet.py | 6 +
.../configs/shufflenet_v2/README.md | 33 +
.../configs/shufflenet_v2/metafile.yml | 35 +
.../shufflenet-v2-1x_16xb64_in1k.py | 8 +
..._v2_1x_b64x16_linearlr_bn_nowd_imagenet.py | 6 +
.../configs/swin_transformer/README.md | 60 +
.../configs/swin_transformer/metafile.yml | 201 ++
.../swin-base_16xb64_in1k-384px.py | 7 +
.../swin_transformer/swin-base_16xb64_in1k.py | 6 +
.../swin-large_16xb64_in1k-384px.py | 7 +
.../swin-large_16xb64_in1k.py | 7 +
.../swin-large_8xb8_cub_384px.py | 37 +
.../swin-small_16xb64_in1k.py | 6 +
.../swin_transformer/swin-tiny_16xb64_in1k.py | 6 +
.../swin_base_224_b16x64_300e_imagenet.py | 6 +
.../swin_base_384_evalonly_imagenet.py | 6 +
.../swin_large_224_evalonly_imagenet.py | 6 +
.../swin_large_384_evalonly_imagenet.py | 6 +
.../swin_small_224_b16x64_300e_imagenet.py | 6 +
.../swin_tiny_224_b16x64_300e_imagenet.py | 6 +
.../configs/swin_transformer_v2/README.md | 58 +
.../configs/swin_transformer_v2/metafile.yml | 204 ++
.../swinv2-base-w16_16xb64_in1k-256px.py | 8 +
...v2-base-w16_in21k-pre_16xb64_in1k-256px.py | 13 +
...v2-base-w24_in21k-pre_16xb64_in1k-384px.py | 14 +
.../swinv2-base-w8_16xb64_in1k-256px.py | 6 +
...2-large-w16_in21k-pre_16xb64_in1k-256px.py | 13 +
...2-large-w24_in21k-pre_16xb64_in1k-384px.py | 15 +
.../swinv2-small-w16_16xb64_in1k-256px.py | 8 +
.../swinv2-small-w8_16xb64_in1k-256px.py | 6 +
.../swinv2-tiny-w16_16xb64_in1k-256px.py | 8 +
.../swinv2-tiny-w8_16xb64_in1k-256px.py | 6 +
.../configs/t2t_vit/README.md | 36 +
.../configs/t2t_vit/metafile.yml | 58 +
.../t2t_vit/t2t-vit-t-14_8xb64_in1k.py | 35 +
.../t2t_vit/t2t-vit-t-19_8xb64_in1k.py | 35 +
.../t2t_vit/t2t-vit-t-24_8xb64_in1k.py | 35 +
.../configs/tnt/README.md | 36 +
.../configs/tnt/metafile.yml | 29 +
.../configs/tnt/tnt-s-p16_16xb64_in1k.py | 39 +
.../tnt_s_patch16_224_evalonly_imagenet.py | 6 +
.../configs/twins/README.md | 39 +
.../configs/twins/metafile.yml | 114 +
.../twins/twins-pcpvt-base_8xb128_in1k.py | 33 +
.../twins/twins-pcpvt-large_16xb64_in1k.py | 5 +
.../twins/twins-pcpvt-small_8xb128_in1k.py | 3 +
.../twins/twins-svt-base_8xb128_in1k.py | 33 +
.../twins/twins-svt-large_16xb64_in1k.py | 5 +
.../twins/twins-svt-small_8xb128_in1k.py | 3 +
.../configs/van/README.md | 50 +
.../configs/van/metafile.yml | 84 +
.../configs/van/van-b0_8xb128_in1k.py | 61 +
.../configs/van/van-b1_8xb128_in1k.py | 61 +
.../configs/van/van-b2_8xb128_in1k.py | 61 +
.../configs/van/van-b3_8xb128_in1k.py | 61 +
.../configs/van/van-b4_8xb128_in1k.py | 61 +
.../configs/van/van-base_8xb128_in1k.py | 6 +
.../configs/van/van-large_8xb128_in1k.py | 6 +
.../configs/van/van-small_8xb128_in1k.py | 6 +
.../configs/van/van-tiny_8xb128_in1k.py | 6 +
.../configs/vgg/README.md | 39 +
.../configs/vgg/metafile.yml | 125 +
.../configs/vgg/vgg11_8xb32_in1k.py} | 0
.../configs/vgg/vgg11_b32x8_imagenet.py | 6 +
.../configs/vgg/vgg11bn_8xb32_in1k.py} | 0
.../configs/vgg/vgg11bn_b32x8_imagenet.py | 6 +
.../configs/vgg/vgg13_8xb32_in1k.py} | 0
.../configs/vgg/vgg13_b32x8_imagenet.py | 6 +
.../configs/vgg/vgg13bn_8xb32_in1k.py} | 0
.../configs/vgg/vgg13bn_b32x8_imagenet.py | 6 +
.../configs/vgg/vgg16_8xb16_voc.py} | 0
.../configs/vgg/vgg16_8xb32_in1k.py | 7 +
.../configs/vgg/vgg16_b16x8_voc.py | 6 +
.../configs/vgg/vgg16_b32x8_imagenet.py | 6 +
.../configs/vgg/vgg16bn_8xb32_in1k.py} | 0
.../configs/vgg/vgg16bn_b32x8_imagenet.py | 6 +
.../configs/vgg/vgg19_8xb32_in1k.py} | 0
.../configs/vgg/vgg19_b32x8_imagenet.py | 6 +
.../configs/vgg/vgg19bn_8xb32_in1k.py} | 0
.../configs/vgg/vgg19bn_b32x8_imagenet.py | 6 +
.../configs/vision_transformer/README.md | 57 +
.../configs/vision_transformer/metafile.yml | 79 +
.../vit-base-p16_ft-4xb544-ipu_in1k.py | 115 +
.../vit-base-p16_ft-64xb64_in1k-384.py | 36 +
.../vit-base-p16_pt-64xb64_in1k-224.py | 12 +
.../vit-base-p32_ft-64xb64_in1k-384.py | 36 +
.../vit-base-p32_pt-64xb64_in1k-224.py | 12 +
.../vit-large-p16_ft-64xb64_in1k-384.py | 36 +
.../vit-large-p16_pt-64xb64_in1k-224.py | 12 +
.../vit-large-p32_ft-64xb64_in1k-384.py | 37 +
.../vit-large-p32_pt-64xb64_in1k-224.py | 12 +
.../configs/wrn/README.md | 35 +
.../configs/wrn/metafile.yml | 77 +
.../configs/wrn/wide-resnet101_8xb32_in1k.py | 7 +
.../configs/wrn/wide-resnet50_8xb32_in1k.py | 5 +
.../wrn/wide-resnet50_timm_8xb32_in1k.py | 5 +
.../mmclassification-0.24.1/demo/bird.JPEG | Bin 0 -> 74237 bytes
.../mmclassification-0.24.1/demo/cat-dog.png | Bin 0 -> 744894 bytes
.../demo/demo.JPEG | Bin
.../mmclassification-0.24.1/demo/dog.jpg | Bin 0 -> 26160 bytes
.../demo/image_demo.py | 33 +
.../demo/ipu_train_example.sh | 9 +
.../mmclassification-0.24.1/docker/Dockerfile | 23 +
.../docker/serve/Dockerfile | 49 +
.../docker/serve/config.properties | 0
.../docker/serve/entrypoint.sh | 0
.../docs/en}/Makefile | 0
.../docs/en/_static/css/readthedocs.css | 27 +
.../docs/en/_static/image}/mmcls-logo.png | Bin
.../image/tools/analysis/analyze_log.jpg | Bin 0 -> 68146 bytes
.../tools/visualization/lr_schedule1.png | Bin 0 -> 30065 bytes
.../tools/visualization/lr_schedule2.png | Bin 0 -> 48176 bytes
.../docs/en/_static/js/custom.js | 1 +
.../docs/en/_templates/classtemplate.rst | 14 +
.../docs/en/api/apis.rst | 45 +
.../docs/en/api/core.rst | 62 +
.../docs/en/api/datasets.rst | 61 +
.../docs/en/api/models.rst | 141 +
.../docs/en/api/models.utils.augment.rst | 35 +
.../docs/en/api/models.utils.rst | 50 +
.../docs/en/api/transforms.rst | 171 ++
.../docs/en/api/utils.rst | 23 +
.../docs/en/changelog.md | 718 +++++
.../docs/en/compatibility.md | 8 +
.../mmclassification-0.24.1/docs/en/conf.py | 238 ++
.../docs/en/device/npu.md | 34 +
.../docs/en/docutils.conf | 2 +
.../mmclassification-0.24.1/docs/en/faq.md | 83 +
.../docs/en/getting_started.md | 275 ++
.../mmclassification-0.24.1/docs/en/index.rst | 99 +
.../docs/en/install.md | 219 ++
.../docs/en/model_zoo.md | 162 ++
.../mmclassification-0.24.1/docs/en/stat.py | 100 +
.../docs/en/tools/analysis.md | 211 ++
.../docs/en/tools/miscellaneous.md | 59 +
.../docs/en/tools/model_serving.md | 87 +
.../docs/en/tools/onnx2tensorrt.md | 80 +
.../docs/en/tools/pytorch2onnx.md | 204 ++
.../docs/en/tools/pytorch2torchscript.md | 56 +
.../docs/en/tools/visualization.md | 302 +++
.../tutorials/MMClassification_python.ipynb | 2040 ++++++++++++++
.../en/tutorials/MMClassification_tools.ipynb | 1249 +++++++++
.../docs/en/tutorials/config.md | 417 +++
.../docs/en/tutorials/data_pipeline.md | 150 ++
.../docs/en/tutorials/finetune.md | 236 ++
.../docs/en/tutorials/new_dataset.md | 239 ++
.../docs/en/tutorials/new_modules.md | 272 ++
.../docs/en/tutorials/runtime.md | 257 ++
.../docs/en/tutorials/schedule.md | 341 +++
.../docs/zh_CN}/Makefile | 0
.../docs/zh_CN/_static/css/readthedocs.css | 27 +
.../docs/zh_CN/_static/image/mmcls-logo.png | Bin 0 -> 33009 bytes
.../image/tools/analysis/analyze_log.jpg | Bin 0 -> 68146 bytes
.../tools/visualization/lr_schedule1.png | Bin 0 -> 30065 bytes
.../tools/visualization/lr_schedule2.png | Bin 0 -> 48176 bytes
.../docs/zh_CN/_static/js/custom.js | 1 +
.../docs/zh_CN/community/CONTRIBUTING.md | 62 +
.../docs/zh_CN/compatibility.md | 7 +
.../docs/zh_CN/conf.py | 226 ++
.../docs/zh_CN/device/npu.md | 34 +
.../docs/zh_CN/docutils.conf | 2 +
.../mmclassification-0.24.1/docs/zh_CN/faq.md | 73 +
.../docs/zh_CN/getting_started.md | 266 ++
.../docs/zh_CN/imgs/qq_group_qrcode.jpg | Bin 0 -> 71955 bytes
.../docs/zh_CN}/imgs/zhihu_qrcode.jpg | Bin
.../docs/zh_CN/index.rst | 99 +
.../docs/zh_CN/install.md | 210 ++
.../docs/zh_CN/stat.py | 99 +
.../docs/zh_CN/tools/analysis.md | 211 ++
.../docs/zh_CN/tools/miscellaneous.md | 59 +
.../docs/zh_CN/tools/model_serving.md | 87 +
.../docs/zh_CN/tools/onnx2tensorrt.md | 75 +
.../docs/zh_CN/tools/pytorch2onnx.md | 88 +
.../docs/zh_CN/tools/pytorch2torchscript.md | 54 +
.../docs/zh_CN/tools/visualization.md | 302 +++
.../MMClassification_python_cn.ipynb | 2041 ++++++++++++++
.../tutorials/MMClassification_tools_cn.ipynb | 1247 +++++++++
.../docs/zh_CN/tutorials/config.md | 417 +++
.../docs/zh_CN/tutorials/data_pipeline.md | 148 ++
.../docs/zh_CN/tutorials/finetune.md | 222 ++
.../docs/zh_CN/tutorials/new_dataset.md | 230 ++
.../docs/zh_CN/tutorials/new_modules.md | 280 ++
.../docs/zh_CN/tutorials/runtime.md | 260 ++
.../docs/zh_CN/tutorials/schedule.md | 333 +++
.../mmclassification-0.24.1/hostfile | 2 +
.../mmclassification-0.24.1/mmcls/__init__.py | 60 +
.../mmcls/apis/__init__.py | 10 +
.../mmcls/apis/inference.py | 30 +-
.../mmcls/apis/test.py | 230 ++
.../mmcls/apis/test_old.py | 228 ++
.../mmcls/apis/test_time.py | 257 ++
.../mmcls/apis/train.py | 232 ++
.../mmcls/core/__init__.py | 5 +
.../mmcls/core/evaluation/__init__.py | 12 +
.../mmcls/core/evaluation/eval_hooks.py | 78 +
.../mmcls/core/evaluation/eval_metrics.py | 259 ++
.../mmcls/core/evaluation/mean_ap.py | 7 +-
.../evaluation/multilabel_eval_metrics.py | 1 +
.../mmcls/core/export/__init__.py | 4 +
.../mmcls/core/export/test.py | 96 +
.../mmcls/core/hook/__init__.py | 10 +
.../mmcls/core/hook/class_num_check_hook.py | 73 +
.../mmcls/core/hook/lr_updater.py | 83 +
.../mmcls/core/hook/precise_bn_hook.py | 180 ++
.../mmcls/core/hook/wandblogger_hook.py | 340 +++
.../mmcls/core/optimizers/__init__.py | 6 +
.../mmcls/core/optimizers/lamb.py | 227 ++
.../mmcls/core/utils/__init__.py | 7 +
.../mmcls/core/utils/dist_utils.py | 98 +
.../mmcls/core/utils/misc.py | 1 +
.../mmcls/core/visualization/__init__.py | 8 +
.../mmcls/core/visualization/image.py | 343 +++
.../mmcls/datasets/__init__.py | 25 +
.../mmcls/datasets/base_dataset.py | 49 +-
.../mmcls/datasets/builder.py | 183 ++
.../mmcls/datasets/cifar.py | 27 +-
.../mmcls/datasets/cub.py | 129 +
.../mmcls/datasets/custom.py | 229 ++
.../mmcls/datasets/dataset_wrappers.py | 329 +++
.../mmcls/datasets/imagenet.py | 136 +-
.../mmcls/datasets/imagenet21k.py | 174 ++
.../mmcls/datasets/mnist.py | 5 +-
.../mmcls/datasets/multi_label.py | 23 +-
.../mmcls/datasets/pipelines/__init__.py | 22 +
.../mmcls/datasets/pipelines/auto_augment.py | 128 +-
.../mmcls/datasets/pipelines/compose.py | 1 +
.../mmcls/datasets/pipelines/formatting.py | 195 ++
.../mmcls/datasets/pipelines/loading.py | 1 +
.../mmcls/datasets/pipelines/transforms.py | 226 +-
.../mmcls/datasets/samplers/__init__.py | 5 +
.../datasets/samplers/distributed_sampler.py | 61 +
.../mmcls/datasets/samplers/repeat_aug.py | 106 +
.../mmcls/datasets/stanford_cars.py | 210 ++
.../mmcls/datasets/utils.py | 153 ++
.../mmcls/datasets/voc.py | 94 +
.../mmcls/models/__init__.py | 14 +
.../mmcls/models/backbones/__init__.py | 51 +
.../mmcls/models/backbones/alexnet.py | 3 +-
.../mmcls/models/backbones/base_backbone.py | 1 +
.../mmcls/models/backbones/conformer.py | 626 +++++
.../mmcls/models/backbones/convmixer.py | 176 ++
.../mmcls/models/backbones/convnext.py | 333 +++
.../mmcls/models/backbones/cspnet.py | 679 +++++
.../mmcls/models/backbones/deit.py | 117 +
.../mmcls/models/backbones/densenet.py | 332 +++
.../mmcls/models/backbones/efficientformer.py | 606 +++++
.../mmcls/models/backbones/efficientnet.py | 407 +++
.../mmcls/models/backbones/hornet.py | 499 ++++
.../mmcls/models/backbones/hrnet.py | 563 ++++
.../mmcls/models/backbones/lenet.py | 3 +-
.../mmcls/models/backbones/mlp_mixer.py | 263 ++
.../mmcls/models/backbones/mobilenet_v2.py | 32 +-
.../mmcls/models/backbones/mobilenet_v3.py | 195 ++
.../mmcls/models/backbones/mvit.py | 700 +++++
.../mmcls/models/backbones/poolformer.py | 416 +++
.../mmcls/models/backbones/regnet.py | 100 +-
.../mmcls/models/backbones/repmlp.py | 578 ++++
.../mmcls/models/backbones/repvgg.py | 619 +++++
.../mmcls/models/backbones/res2net.py | 306 +++
.../mmcls/models/backbones/resnest.py | 3 +-
.../mmcls/models/backbones/resnet.py | 84 +-
.../mmcls/models/backbones/resnet_cifar.py | 6 +-
.../mmcls/models/backbones/resnext.py | 3 +-
.../mmcls/models/backbones/seresnet.py | 3 +-
.../mmcls/models/backbones/seresnext.py | 3 +-
.../mmcls/models/backbones/shufflenet_v1.py | 22 +-
.../mmcls/models/backbones/shufflenet_v2.py | 31 +-
.../models/backbones/swin_transformer.py | 548 ++++
.../models/backbones/swin_transformer_v2.py | 560 ++++
.../mmcls/models/backbones/t2t_vit.py | 440 +++
.../mmcls/models/backbones/timm_backbone.py | 112 +
.../mmcls/models/backbones/tnt.py | 368 +++
.../mmcls/models/backbones/twins.py | 723 +++++
.../mmcls/models/backbones/van.py | 445 ++++
.../mmcls/models/backbones/vgg.py | 19 +-
.../models/backbones/vision_transformer.py | 383 +++
.../mmcls/models/builder.py | 38 +
.../mmcls/models/classifiers/__init__.py | 5 +
.../mmcls/models/classifiers/base.py | 224 ++
.../mmcls/models/classifiers/image.py | 160 ++
.../mmcls/models/heads/__init__.py | 17 +
.../mmcls/models/heads/base_head.py | 1 +
.../mmcls/models/heads/cls_head.py | 116 +
.../mmcls/models/heads/conformer_head.py | 132 +
.../mmcls/models/heads/deit_head.py | 96 +
.../models/heads/efficientformer_head.py | 96 +
.../mmcls/models/heads/linear_head.py | 81 +
.../models/heads/multi_label_csra_head.py | 121 +
.../mmcls/models/heads/multi_label_head.py | 99 +
.../models/heads/multi_label_linear_head.py | 85 +
.../mmcls/models/heads/stacked_head.py | 163 ++
.../models/heads/vision_transformer_head.py | 123 +
.../mmcls/models/losses/__init__.py | 17 +
.../mmcls/models/losses/accuracy.py | 143 +
.../mmcls/models/losses/asymmetric_loss.py | 149 ++
.../mmcls/models/losses/cross_entropy_loss.py | 209 ++
.../mmcls/models/losses/focal_loss.py | 23 +-
.../mmcls/models/losses/label_smooth_loss.py | 45 +-
.../mmcls/models/losses/seesaw_loss.py | 173 ++
.../mmcls/models/losses/utils.py | 119 +
.../mmcls/models/necks/__init__.py | 6 +
.../mmcls/models/necks/gap.py | 1 +
.../mmcls/models/necks/gem.py | 53 +
.../mmcls/models/necks/hr_fuse.py | 83 +
.../mmcls/models/utils/__init__.py | 20 +
.../mmcls/models/utils/attention.py | 564 ++++
.../mmcls/models/utils/augment/__init__.py | 9 +
.../mmcls/models/utils/augment/augments.py | 2 +
.../mmcls/models/utils/augment/builder.py | 8 +
.../mmcls/models/utils/augment/cutmix.py | 175 ++
.../mmcls/models/utils/augment/identity.py | 6 +-
.../mmcls/models/utils/augment/mixup.py | 80 +
.../mmcls/models/utils/augment/resizemix.py | 93 +
.../mmcls/models/utils/augment/utils.py | 24 +
.../mmcls/models/utils/channel_shuffle.py | 1 +
.../mmcls/models/utils/embed.py | 420 +++
.../mmcls/models/utils/helpers.py | 53 +
.../mmcls/models/utils/inverted_residual.py | 125 +
.../mmcls/models/utils/layer_scale.py | 35 +
.../mmcls/models/utils/make_divisible.py | 1 +
.../mmcls/models/utils/position_encoding.py | 41 +
.../mmcls/models/utils/se_layer.py | 80 +
.../mmcls/utils/__init__.py | 12 +
.../mmcls/utils/collect_env.py | 1 +
.../mmcls/utils/device.py | 15 +
.../mmcls/utils/distribution.py | 68 +
.../mmcls/utils/logger.py | 56 +
.../mmcls/utils/setup_env.py | 47 +
.../mmcls/version.py | 4 +-
.../mmclassification-0.24.1/model-index.yml | 34 +
.../mmclassification-0.24.1/mult_test.sh | 7 +
.../requirements.txt | 0
.../requirements/docs.txt | 6 +
.../requirements/mminstall.txt | 1 +
.../requirements/optional.txt | 5 +
.../requirements/readthedocs.txt | 3 +
.../requirements/runtime.txt | 3 +
.../requirements/tests.txt | 1 +
.../resources/mmcls-logo.png | Bin 0 -> 33009 bytes
.../mmclassification-0.24.1/setup.cfg | 23 +
.../mmclassification-0.24.1/setup.py | 194 ++
.../sing_test.sh | 2 +-
.../mmclassification-0.24.1/single_process.sh | 28 +
.../tests/data/color.jpg | Bin
.../tests/data/dataset/a/1.JPG | 0
.../tests/data/dataset/ann.txt | 3 +
.../tests/data/dataset/b/2.jpeg | 0
.../tests/data/dataset/b/subb/3.jpg | 0
.../tests/data/dataset/classes.txt | 2 +
.../tests/data/gray.jpg | Bin
.../tests/data/retinanet.py | 83 +
.../tests/data/test.logjson | 10 +
.../tests/test_data/test_builder.py | 272 ++
.../test_data/test_datasets/test_common.py | 911 +++++++
.../test_datasets/test_dataset_utils.py | 22 +
.../test_datasets/test_dataset_wrapper.py | 192 ++
.../test_data/test_datasets/test_sampler.py | 53 +
.../test_pipelines/test_auto_augment.py | 103 +-
.../test_data}/test_pipelines/test_loading.py | 3 +-
.../test_pipelines/test_transform.py | 142 +-
.../test_downstream/test_mmdet_inference.py | 118 +
.../tests/test_metrics/test_losses.py | 362 +++
.../tests/test_metrics/test_metrics.py | 93 +
.../tests/test_metrics/test_utils.py | 49 +
.../test_models/test_backbones/__init__.py | 1 +
.../test_backbones/test_conformer.py | 111 +
.../test_backbones/test_convmixer.py | 84 +
.../test_backbones/test_convnext.py | 86 +
.../test_models/test_backbones/test_cspnet.py | 147 +
.../test_models/test_backbones/test_deit.py | 131 +
.../test_backbones/test_densenet.py | 95 +
.../test_backbones/test_efficientformer.py | 199 ++
.../test_backbones/test_efficientnet.py | 144 +
.../test_models/test_backbones/test_hornet.py | 174 ++
.../test_models/test_backbones/test_hrnet.py | 93 +
.../test_backbones/test_mlp_mixer.py | 119 +
.../test_backbones/test_mobilenet_v2.py | 7 +-
.../test_backbones/test_mobilenet_v3.py | 175 ++
.../test_models/test_backbones/test_mvit.py | 185 ++
.../test_backbones/test_poolformer.py | 143 +
.../test_backbones/test_regnet.py | 11 +-
.../test_models/test_backbones/test_repmlp.py | 172 ++
.../test_models/test_backbones/test_repvgg.py | 350 +++
.../test_backbones/test_res2net.py | 71 +
.../test_backbones/test_resnest.py | 1 +
.../test_backbones/test_resnet.py | 58 +-
.../test_backbones/test_resnet_cifar.py | 1 +
.../test_backbones/test_resnext.py | 4 +-
.../test_backbones/test_seresnet.py | 4 +-
.../test_backbones/test_seresnext.py | 4 +-
.../test_backbones/test_shufflenet_v1.py | 6 +-
.../test_backbones/test_shufflenet_v2.py | 6 +-
.../test_backbones/test_swin_transformer.py | 255 ++
.../test_swin_transformer_v2.py | 243 ++
.../test_backbones/test_t2t_vit.py | 188 ++
.../test_backbones/test_timm_backbone.py | 204 ++
.../test_models/test_backbones/test_tnt.py | 50 +
.../test_models/test_backbones/test_twins.py | 243 ++
.../test_models/test_backbones/test_van.py | 188 ++
.../test_models}/test_backbones/test_vgg.py | 7 +-
.../test_backbones/test_vision_transformer.py | 183 ++
.../tests/test_models/test_backbones/utils.py | 31 +
.../tests/test_models/test_classifiers.py | 326 +++
.../tests/test_models/test_heads.py | 400 +++
.../tests/test_models/test_neck.py | 87 +
.../test_models/test_utils/test_attention.py | 208 ++
.../test_models/test_utils/test_augment.py | 96 +
.../test_models/test_utils/test_embed.py | 88 +
.../test_utils/test_inverted_residual.py | 82 +
.../test_utils/test_layer_scale.py | 48 +
.../tests/test_models/test_utils/test_misc.py | 59 +
.../test_utils/test_position_encoding.py | 10 +
.../tests/test_models/test_utils/test_se.py | 95 +
.../tests/test_runtime}/test_eval_hook.py | 30 +-
.../tests/test_runtime/test_hooks.py | 158 ++
.../tests/test_runtime/test_num_class_hook.py | 84 +
.../tests/test_runtime/test_optimizer.py | 309 +++
.../tests/test_runtime/test_preciseBN_hook.py | 274 ++
.../tests/test_utils/test_device.py | 28 +
.../tests/test_utils/test_logger.py | 55 +
.../tests/test_utils/test_setup_env.py | 68 +
.../tests/test_utils/test_version_utils.py | 21 +
.../tests/test_utils/test_visualization.py | 100 +
.../tools/analysis_tools/analyze_logs.py | 215 ++
.../tools/analysis_tools}/analyze_results.py | 28 +-
.../tools/analysis_tools/eval_metric.py | 71 +
.../tools/analysis_tools}/get_flops.py | 5 +-
.../convert_models/efficientnet_to_mmcls.py | 215 ++
.../tools/convert_models/hornet2mmcls.py | 61 +
.../tools/convert_models/mlpmixer_to_mmcls.py | 58 +
.../convert_models/mobilenetv2_to_mmcls.py | 1 +
.../tools/convert_models/publish_model.py | 55 +
.../convert_models/reparameterize_model.py | 55 +
.../convert_models/reparameterize_repvgg.py | 60 +
.../tools/convert_models/repvgg_to_mmcls.py | 60 +
.../convert_models/shufflenetv2_to_mmcls.py | 1 +
.../convert_models/torchvision_to_mmcls.py | 63 +
.../tools/convert_models/twins2mmcls.py | 73 +
.../tools/convert_models/van2mmcls.py | 65 +
.../tools/convert_models/vgg_to_mmcls.py | 3 +-
.../tools/deployment/mmcls2torchserve.py | 1 +
.../tools/deployment/mmcls_handler.py | 1 +
.../tools/deployment/onnx2tensorrt.py | 20 +-
.../tools/deployment/pytorch2mlmodel.py | 160 ++
.../tools/deployment/pytorch2onnx.py | 54 +-
.../tools/deployment/pytorch2torchscript.py | 3 +-
.../tools/deployment/test.py | 128 +
.../tools/deployment/test_torchserver.py | 45 +
.../tools/dist_test.sh | 22 +
.../tools/dist_train.sh | 21 +
.../tools/kfold-cross-valid.py | 371 +++
.../tools/misc/print_config.py | 35 +
.../tools/misc/verify_dataset.py | 131 +
.../tools/slurm_test.sh | 0
.../tools/slurm_train.sh | 0
.../mmclassification-0.24.1/tools/test.py | 254 ++
.../mmclassification-0.24.1/tools/train.py | 215 ++
.../tools/visualizations/vis_cam.py | 356 +++
.../tools/visualizations/vis_lr.py | 334 +++
.../tools/visualizations/vis_pipeline.py | 337 +++
.../mmclassification-0.24.1/train.md | 101 +
.../.github/CONTRIBUTING.md | 69 -
.../.github/workflows/build.yml | 98 -
.../.github/workflows/deploy.yml | 22 -
.../.gitignore | 117 -
.../.idea/.gitignore | 3 -
.../.idea/.name | 1 -
.../inspectionProfiles/profiles_settings.xml | 6 -
.../mmclassification-speed-benchmark.iml | 15 -
.../.idea/modules.xml | 8 -
.../.idea/vcs.xml | 6 -
.../.pre-commit-config.yaml | 50 -
.../.readthedocs.yml | 7 -
.../MANIFEST.in | 3 -
.../README.md | 98 -
.../README_zh-CN.md | 101 -
.../configs/_base_/datasets/imagenet_bs32.py | 40 -
.../configs/_base_/models/AlexNet_1x.py | 5 -
.../configs/_base_/models/resnest101.py | 18 -
.../configs/_base_/models/resnest200.py | 18 -
.../configs/_base_/models/resnest269.py | 18 -
.../configs/_base_/models/resnest50.py | 17 -
.../models/vit_base_patch16_224_finetune.py | 21 -
.../models/vit_base_patch16_224_pretrain.py | 26 -
.../models/vit_base_patch16_384_finetune.py | 21 -
.../models/vit_base_patch32_384_finetune.py | 21 -
.../models/vit_large_patch16_224_finetune.py | 21 -
.../models/vit_large_patch16_384_finetune.py | 21 -
.../models/vit_large_patch32_384_finetune.py | 21 -
.../_base_/schedules/imagenet_bs4096_AdamW.py | 18 -
.../configs/fp16/README.md | 20 -
.../configs/fp16/metafile.yml | 30 -
.../fp16/resnet152_b32x8_fp16_imagenet.py | 6 -
.../fp16/resnet18_b32x8_fp16_imagenet.py | 6 -
.../fp16/resnet34_b32x8_fp16_imagenet.py | 6 -
.../resnet50_b32x8_fp16_dynamic_imagenet.py | 4 -
.../fp16/resnet50_b32x8_fp16_imagenet.py | 10 -
.../resnext50_32x4d_b32x8_fp16_imagenet.py | 6 -
.../fp16/seresnet50_b32x8_fp16_imagenet.py | 7 -
...x_b64x16_linearlr_bn_nowd_fp16_imagenet.py | 7 -
...x_b64x16_linearlr_bn_nowd_fp16_imagenet.py | 7 -
.../configs/fp16/vgg11_b32x8_fp16_imagenet.py | 6 -
.../configs/lenet/README.md | 18 -
.../configs/mobilenet_v2/README.md | 26 -
.../configs/mobilenet_v2/metafile.yml | 29 -
.../mobilenet_v2_b32x8_imagenet.py | 6 -
.../configs/regnet/README.md | 37 -
.../regnet/regnetx_1.6gf_b32x8_imagenet.py | 51 -
.../regnet/regnetx_12gf_b32x8_imagenet.py | 51 -
.../regnet/regnetx_3.2gf_b32x8_imagenet.py | 51 -
.../regnet/regnetx_4.0gf_b32x8_imagenet.py | 51 -
.../regnet/regnetx_400mf_b32x8_imagenet.py | 51 -
.../regnet/regnetx_6.4gf_b32x8_imagenet.py | 51 -
.../regnet/regnetx_8.0gf_b32x8_imagenet.py | 51 -
.../regnet/regnetx_800mf_b32x8_imagenet.py | 51 -
.../configs/resnet/README.md | 46 -
.../configs/resnet/metafile.yml | 217 --
.../configs/resnet/resnet50_b32x8_imagenet.py | 4 -
...t50_b64x32_warmup_label_smooth_imagenet.py | 12 -
.../configs/resnext/README.md | 26 -
.../configs/resnext/metafile.yml | 68 -
.../configs/seresnet/README.md | 24 -
.../configs/seresnet/metafile.yml | 42 -
.../configs/seresnext/README.md | 15 -
.../configs/shufflenet_v1/README.md | 23 -
.../configs/shufflenet_v1/metafile.yml | 30 -
.../configs/shufflenet_v2/README.md | 23 -
.../configs/shufflenet_v2/metafile.yml | 30 -
..._v2_1x_b64x16_linearlr_bn_nowd_imagenet.py | 6 -
.../configs/speed_test/AlexNet.py | 5 -
.../speed_test/datasets/imagenet_bs32.py | 35 -
.../speed_test/datasets/imagenet_bs64.py | 35 -
.../mobilenet_v2_b32x8_fp16_imagenet.py | 7 -
.../speed_test/mobilenet_v2_b32x8_imagenet.py | 5 -
.../resnet152_b32x8_fp16_imagenet.py | 6 -
.../speed_test/resnet152_b32x8_imagenet.py | 4 -
.../resnet18_b32x8_fp16_imagenet.py | 6 -
.../speed_test/resnet18_b32x8_imagenet.py | 4 -
.../resnet34_b32x8_fp16_imagenet.py | 6 -
.../speed_test/resnet34_b32x8_imagenet.py | 4 -
.../resnet50_b32x8_fp16_imagenet.py | 10 -
.../speed_test/resnet50_b32x8_imagenet.py | 4 -
.../resnext50_32x4d_b32x8_fp16_imagenet.py | 6 -
.../resnext50_32x4d_b32x8_imagenet.py | 4 -
.../seresnet50_b32x8_fp16_imagenet.py | 7 -
.../speed_test/seresnet50_b32x8_imagenet.py | 5 -
...x_b64x16_linearlr_bn_nowd_fp16_imagenet.py | 7 -
..._v1_1x_b64x16_linearlr_bn_nowd_imagenet.py | 5 -
...x_b64x16_linearlr_bn_nowd_fp16_imagenet.py | 7 -
..._v2_1x_b64x16_linearlr_bn_nowd_imagenet.py | 5 -
.../speed_test/vgg11_b32x8_fp16_imagenet.py | 8 -
.../speed_test/vgg11_b32x8_imagenet.py | 7 -
.../configs/vgg/README.md | 30 -
.../configs/vgg/metafile.yml | 120 -
.../configs/vgg/vgg16_b32x8_imagenet.py | 6 -
.../vit_base_patch16_224_finetune_imagenet.py | 10 -
.../vit_base_patch16_224_pretrain_imagenet.py | 143 -
.../vit_base_patch16_384_finetune_imagenet.py | 21 -
.../vit_base_patch32_384_finetune_imagenet.py | 21 -
...vit_large_patch16_224_finetune_imagenet.py | 10 -
...vit_large_patch16_384_finetune_imagenet.py | 21 -
...vit_large_patch32_384_finetune_imagenet.py | 21 -
.../demo/image_demo.py | 24 -
.../docker/serve/Dockerfile | 47 -
.../docs/changelog.md | 236 --
.../docs/conf.py | 83 -
.../docs/getting_started.md | 225 --
.../docs/imgs/qq_group_qrcode.jpg | Bin 204806 -> 0 bytes
.../docs/index.rst | 52 -
.../docs/install.md | 87 -
.../docs/model_zoo.md | 54 -
.../docs/stat.py | 65 -
.../docs/switch_language.md | 3 -
.../tutorials/MMClassification_Tutorial.ipynb | 2353 -----------------
.../docs/tutorials/data_pipeline.md | 144 -
.../docs/tutorials/finetune.md | 94 -
.../docs/tutorials/model_serving.md | 55 -
.../docs/tutorials/new_dataset.md | 141 -
.../docs/tutorials/new_modules.md | 272 --
.../docs/tutorials/onnx2tensorrt.md | 80 -
.../docs/tutorials/pytorch2onnx.md | 204 --
.../docs/tutorials/pytorch2torchscript.md | 56 -
.../docs_zh-CN/conf.py | 85 -
.../docs_zh-CN/getting_started.md | 222 --
.../docs_zh-CN/index.rst | 52 -
.../docs_zh-CN/install.md | 84 -
.../docs_zh-CN/stat.py | 65 -
.../docs_zh-CN/switch_language.md | 3 -
.../tutorials/MMClassification_Tutorial.ipynb | 2353 -----------------
.../docs_zh-CN/tutorials/data_pipeline.md | 144 -
.../docs_zh-CN/tutorials/finetune.md | 92 -
.../docs_zh-CN/tutorials/new_dataset.md | 140 -
.../docs_zh-CN/tutorials/new_modules.md | 281 --
.../docs_zh-CN/tutorials/onnx2tensorrt.md | 76 -
.../docs_zh-CN/tutorials/pytorch2onnx.md | 89 -
.../image/train/1659061854685.png | Bin 13691 -> 0 bytes
.../image/train/1659062180839.png | Bin 71083 -> 0 bytes
.../image/train/1659064222206.png | Bin 65539 -> 0 bytes
.../image/train/1659064427635.png | Bin 13691 -> 0 bytes
.../image/train/1659064905610.png | Bin 13691 -> 0 bytes
.../image/train/1659064925468.png | Bin 56351 -> 0 bytes
.../image/train/1659065333529.png | Bin 56817 -> 0 bytes
.../image/train/1659065659769.png | Bin 94925 -> 0 bytes
.../image/train/1659065746317.png | Bin 95196 -> 0 bytes
.../image/train/1659066120939.png | Bin 39302 -> 0 bytes
.../image/train/1659067079718.png | Bin 60015 -> 0 bytes
.../images/1657694041240.png | Bin 13691 -> 0 bytes
.../images/1657694072163.png | Bin 65539 -> 0 bytes
.../images/2022-07-13-14-06-28.png | Bin 13691 -> 0 bytes
.../images/2022-07-13-14-07-43.png | Bin 71083 -> 0 bytes
.../mmcls.egg-info/PKG-INFO | 116 -
.../mmcls.egg-info/SOURCES.txt | 103 -
.../mmcls.egg-info/dependency_links.txt | 1 -
.../mmcls.egg-info/not-zip-safe | 1 -
.../mmcls.egg-info/requires.txt | 2 -
.../mmcls.egg-info/top_level.txt | 1 -
.../mmcls/__init__.py | 28 -
.../mmcls/__pycache__/__init__.cpython-36.pyc | Bin 800 -> 0 bytes
.../mmcls/__pycache__/version.cpython-36.pyc | Bin 823 -> 0 bytes
.../mmcls/apis/__init__.py | 8 -
.../apis/__pycache__/__init__.cpython-36.pyc | Bin 460 -> 0 bytes
.../apis/__pycache__/inference.cpython-36.pyc | Bin 3493 -> 0 bytes
.../apis/__pycache__/test.cpython-36.pyc | Bin 5269 -> 0 bytes
.../apis/__pycache__/train.cpython-36.pyc | Bin 3834 -> 0 bytes
.../mmcls/apis/test.py | 197 --
.../mmcls/apis/train.py | 159 --
.../mmcls/core/__init__.py | 3 -
.../core/__pycache__/__init__.cpython-36.pyc | Bin 219 -> 0 bytes
.../mmcls/core/evaluation/__init__.py | 11 -
.../__pycache__/__init__.cpython-36.pyc | Bin 629 -> 0 bytes
.../__pycache__/eval_hooks.cpython-36.pyc | Bin 3843 -> 0 bytes
.../__pycache__/eval_metrics.cpython-36.pyc | Bin 9312 -> 0 bytes
.../__pycache__/mean_ap.cpython-36.pyc | Bin 2334 -> 0 bytes
.../multilabel_eval_metrics.cpython-36.pyc | Bin 2418 -> 0 bytes
.../mmcls/core/evaluation/eval_hooks.py | 106 -
.../mmcls/core/evaluation/eval_metrics.py | 235 --
.../mmcls/core/export/__init__.py | 3 -
.../mmcls/core/export/test.py | 95 -
.../mmcls/core/fp16/__init__.py | 4 -
.../fp16/__pycache__/__init__.cpython-36.pyc | Bin 345 -> 0 bytes
.../__pycache__/decorators.cpython-36.pyc | Bin 4269 -> 0 bytes
.../fp16/__pycache__/hooks.cpython-36.pyc | Bin 4034 -> 0 bytes
.../fp16/__pycache__/utils.cpython-36.pyc | Bin 942 -> 0 bytes
.../mmcls/core/fp16/decorators.py | 160 --
.../mmcls/core/fp16/hooks.py | 128 -
.../mmcls/core/fp16/utils.py | 23 -
.../mmcls/core/utils/__init__.py | 4 -
.../utils/__pycache__/__init__.cpython-36.pyc | Bin 319 -> 0 bytes
.../__pycache__/dist_utils.cpython-36.pyc | Bin 2081 -> 0 bytes
.../utils/__pycache__/misc.cpython-36.pyc | Bin 418 -> 0 bytes
.../mmcls/core/utils/dist_utils.py | 56 -
.../mmcls/datasets/__init__.py | 18 -
.../__pycache__/__init__.cpython-36.pyc | Bin 915 -> 0 bytes
.../__pycache__/base_dataset.cpython-36.pyc | Bin 6745 -> 0 bytes
.../__pycache__/builder.cpython-36.pyc | Bin 3150 -> 0 bytes
.../datasets/__pycache__/cifar.cpython-36.pyc | Bin 4139 -> 0 bytes
.../dataset_wrappers.cpython-36.pyc | Bin 6115 -> 0 bytes
.../datasets/__pycache__/dummy.cpython-36.pyc | Bin 1602 -> 0 bytes
.../__pycache__/imagenet.cpython-36.pyc | Bin 31874 -> 0 bytes
.../datasets/__pycache__/mnist.cpython-36.pyc | Bin 5880 -> 0 bytes
.../__pycache__/multi_label.cpython-36.pyc | Bin 2648 -> 0 bytes
.../datasets/__pycache__/utils.cpython-36.pyc | Bin 4474 -> 0 bytes
.../datasets/__pycache__/voc.cpython-36.pyc | Bin 2156 -> 0 bytes
.../mmcls/datasets/builder.py | 108 -
.../mmcls/datasets/dataset_wrappers.py | 162 --
.../mmcls/datasets/dummy.py | 45 -
.../mmcls/datasets/pipelines/__init__.py | 21 -
.../__pycache__/__init__.cpython-36.pyc | Bin 1265 -> 0 bytes
.../__pycache__/auto_augment.cpython-36.pyc | Bin 33038 -> 0 bytes
.../__pycache__/compose.cpython-36.pyc | Bin 1488 -> 0 bytes
.../__pycache__/formating.cpython-36.pyc | Bin 6625 -> 0 bytes
.../__pycache__/loading.cpython-36.pyc | Bin 2504 -> 0 bytes
.../__pycache__/transforms.cpython-36.pyc | Bin 34966 -> 0 bytes
.../mmcls/datasets/pipelines/formating.py | 178 --
.../mmcls/datasets/samplers/__init__.py | 3 -
.../__pycache__/__init__.cpython-36.pyc | Bin 256 -> 0 bytes
.../distributed_sampler.cpython-36.pyc | Bin 1273 -> 0 bytes
.../datasets/samplers/distributed_sampler.py | 42 -
.../mmcls/datasets/utils.py | 152 --
.../mmcls/datasets/voc.py | 68 -
.../mmcls/models/__init__.py | 13 -
.../__pycache__/__init__.cpython-36.pyc | Bin 576 -> 0 bytes
.../models/__pycache__/builder.cpython-36.pyc | Bin 1007 -> 0 bytes
.../mmcls/models/backbones/__init__.py | 21 -
.../__pycache__/__init__.cpython-36.pyc | Bin 969 -> 0 bytes
.../__pycache__/alexnet.cpython-36.pyc | Bin 1782 -> 0 bytes
.../__pycache__/base_backbone.cpython-36.pyc | Bin 1465 -> 0 bytes
.../__pycache__/lenet.cpython-36.pyc | Bin 1514 -> 0 bytes
.../__pycache__/mobilenet_v2.cpython-36.pyc | Bin 7889 -> 0 bytes
.../__pycache__/mobilenet_v3.cpython-36.pyc | Bin 4921 -> 0 bytes
.../__pycache__/regnet.cpython-36.pyc | Bin 10254 -> 0 bytes
.../__pycache__/resnest.cpython-36.pyc | Bin 10683 -> 0 bytes
.../__pycache__/resnet.cpython-36.pyc | Bin 17088 -> 0 bytes
.../__pycache__/resnet_cifar.cpython-36.pyc | Bin 4078 -> 0 bytes
.../__pycache__/resnext.cpython-36.pyc | Bin 5664 -> 0 bytes
.../__pycache__/seresnet.cpython-36.pyc | Bin 5064 -> 0 bytes
.../__pycache__/seresnext.cpython-36.pyc | Bin 5937 -> 0 bytes
.../__pycache__/shufflenet_v1.cpython-36.pyc | Bin 9313 -> 0 bytes
.../__pycache__/shufflenet_v2.cpython-36.pyc | Bin 7768 -> 0 bytes
.../backbones/__pycache__/vgg.cpython-36.pyc | Bin 5267 -> 0 bytes
.../vision_transformer.cpython-36.pyc | Bin 14366 -> 0 bytes
.../mmcls/models/backbones/mobilenet_v3.py | 173 --
.../models/backbones/vision_transformer.py | 480 ----
.../mmcls/models/builder.py | 34 -
.../mmcls/models/classifiers/__init__.py | 4 -
.../__pycache__/__init__.cpython-36.pyc | Bin 290 -> 0 bytes
.../__pycache__/base.cpython-36.pyc | Bin 8092 -> 0 bytes
.../__pycache__/image.cpython-36.pyc | Bin 3083 -> 0 bytes
.../mmcls/models/classifiers/base.py | 224 --
.../mmcls/models/classifiers/image.py | 97 -
.../mmcls/models/heads/__init__.py | 10 -
.../heads/__pycache__/__init__.cpython-36.pyc | Bin 509 -> 0 bytes
.../__pycache__/base_head.cpython-36.pyc | Bin 818 -> 0 bytes
.../heads/__pycache__/cls_head.cpython-36.pyc | Bin 2623 -> 0 bytes
.../__pycache__/linear_head.cpython-36.pyc | Bin 2095 -> 0 bytes
.../multi_label_head.cpython-36.pyc | Bin 1946 -> 0 bytes
.../multi_label_linear_head.cpython-36.pyc | Bin 2291 -> 0 bytes
.../vision_transformer_head.cpython-36.pyc | Bin 2958 -> 0 bytes
.../mmcls/models/heads/cls_head.py | 70 -
.../mmcls/models/heads/linear_head.py | 61 -
.../mmcls/models/heads/multi_label_head.py | 55 -
.../models/heads/multi_label_linear_head.py | 64 -
.../models/heads/vision_transformer_head.py | 81 -
.../mmcls/models/losses/__init__.py | 15 -
.../__pycache__/__init__.cpython-36.pyc | Bin 767 -> 0 bytes
.../__pycache__/accuracy.cpython-36.pyc | Bin 4296 -> 0 bytes
.../asymmetric_loss.cpython-36.pyc | Bin 3497 -> 0 bytes
.../cross_entropy_loss.cpython-36.pyc | Bin 4627 -> 0 bytes
.../__pycache__/focal_loss.cpython-36.pyc | Bin 3827 -> 0 bytes
.../label_smooth_loss.cpython-36.pyc | Bin 5024 -> 0 bytes
.../losses/__pycache__/utils.cpython-36.pyc | Bin 3436 -> 0 bytes
.../mmcls/models/losses/accuracy.py | 134 -
.../mmcls/models/losses/asymmetric_loss.py | 111 -
.../mmcls/models/losses/cross_entropy_loss.py | 157 --
.../mmcls/models/losses/utils.py | 120 -
.../mmcls/models/necks/__init__.py | 3 -
.../necks/__pycache__/__init__.cpython-36.pyc | Bin 237 -> 0 bytes
.../necks/__pycache__/gap.cpython-36.pyc | Bin 2185 -> 0 bytes
.../mmcls/models/utils/__init__.py | 11 -
.../utils/__pycache__/__init__.cpython-36.pyc | Bin 570 -> 0 bytes
.../channel_shuffle.cpython-36.pyc | Bin 964 -> 0 bytes
.../utils/__pycache__/helpers.cpython-36.pyc | Bin 661 -> 0 bytes
.../inverted_residual.cpython-36.pyc | Bin 3156 -> 0 bytes
.../__pycache__/make_divisible.cpython-36.pyc | Bin 1049 -> 0 bytes
.../utils/__pycache__/se_layer.cpython-36.pyc | Bin 2103 -> 0 bytes
.../mmcls/models/utils/augment/__init__.py | 6 -
.../__pycache__/__init__.cpython-36.pyc | Bin 392 -> 0 bytes
.../__pycache__/augments.cpython-36.pyc | Bin 2941 -> 0 bytes
.../__pycache__/builder.cpython-36.pyc | Bin 410 -> 0 bytes
.../augment/__pycache__/cutmix.cpython-36.pyc | Bin 5541 -> 0 bytes
.../__pycache__/identity.cpython-36.pyc | Bin 1357 -> 0 bytes
.../augment/__pycache__/mixup.cpython-36.pyc | Bin 2317 -> 0 bytes
.../mmcls/models/utils/augment/builder.py | 7 -
.../mmcls/models/utils/augment/cutmix.py | 139 -
.../mmcls/models/utils/augment/mixup.py | 56 -
.../mmcls/models/utils/helpers.py | 20 -
.../mmcls/models/utils/inverted_residual.py | 119 -
.../mmcls/models/utils/se_layer.py | 56 -
.../mmcls/utils/__init__.py | 4 -
.../utils/__pycache__/__init__.cpython-36.pyc | Bin 269 -> 0 bytes
.../__pycache__/collect_env.cpython-36.pyc | Bin 623 -> 0 bytes
.../utils/__pycache__/logger.cpython-36.pyc | Bin 377 -> 0 bytes
.../mmcls/utils/logger.py | 7 -
.../model_zoo.yml | 10 -
.../multi_test.sh | 7 -
.../requirements/docs.txt | 4 -
.../requirements/mminstall.txt | 1 -
.../requirements/optional.txt | 1 -
.../requirements/readthedocs.txt | 3 -
.../requirements/runtime.txt | 2 -
.../setup.cfg | 19 -
.../mmclassification-speed-benchmark/setup.py | 118 -
.../tests/test_backbones/test_mobilenet_v3.py | 168 --
.../tests/test_backbones/test_utils.py | 164 --
.../test_backbones/test_vision_transformer.py | 57 -
.../tests/test_classifiers.py | 221 --
.../tests/test_dataset.py | 349 ---
.../tests/test_heads.py | 50 -
.../tests/test_losses.py | 207 --
.../tests/test_metrics.py | 56 -
.../tests/test_neck.py | 38 -
.../tools/benchmark_regression.py | 166 --
.../tools/deployment/test.py | 115 -
.../tools/dist_test.sh | 10 -
.../tools/dist_train.sh | 9 -
.../tools/publish_model.py | 39 -
.../tools/test.py | 176 --
.../tools/train.py | 156 --
.../mmclassification-speed-benchmark/train.md | 258 --
1250 files changed, 64007 insertions(+), 18154 deletions(-)
create mode 100644 openmmlab_test/mmclassification-0.24.1/.gitattributes
create mode 100644 openmmlab_test/mmclassification-0.24.1/.gitignore
create mode 100644 openmmlab_test/mmclassification-0.24.1/.pre-commit-config.yaml
create mode 100644 openmmlab_test/mmclassification-0.24.1/.readthedocs.yml
create mode 100644 openmmlab_test/mmclassification-0.24.1/CITATION.cff
create mode 100644 openmmlab_test/mmclassification-0.24.1/CONTRIBUTING.md
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/LICENSE (99%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/MANIFEST.in
create mode 100644 openmmlab_test/mmclassification-0.24.1/README.md
create mode 100644 openmmlab_test/mmclassification-0.24.1/README_zh-CN.md
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/configs/_base_/datasets/cifar100_bs16.py (100%)
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/configs/_base_/datasets/cifar10_bs16.py (100%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/cub_bs8_384.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/cub_bs8_448.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/imagenet21k_bs128.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/imagenet_bs128_poolformer_medium_224.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/imagenet_bs128_poolformer_small_224.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/imagenet_bs256_rsb_a12.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/imagenet_bs256_rsb_a3.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/imagenet_bs32.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/imagenet_bs32_pil_bicubic.py
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/configs/_base_/datasets/imagenet_bs32_pil_resize.py (91%)
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/configs/_base_/datasets/imagenet_bs64.py (100%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/imagenet_bs64_autoaug.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/imagenet_bs64_convmixer_224.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/imagenet_bs64_mixer_224.py
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/configs/_base_/datasets/imagenet_bs64_pil_resize.py (92%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/imagenet_bs64_pil_resize_autoaug.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/imagenet_bs64_swin_224.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/imagenet_bs64_swin_256.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/imagenet_bs64_swin_384.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/imagenet_bs64_t2t_224.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/pipelines/auto_aug.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/pipelines/rand_aug.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/stanford_cars_bs8_448.py
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/configs/_base_/datasets/voc_bs16.py (100%)
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/configs/_base_/default_runtime.py (100%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/conformer/base-p16.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/conformer/small-p16.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/conformer/small-p32.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/conformer/tiny-p16.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/convmixer/convmixer-1024-20.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/convmixer/convmixer-1536-20.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/convmixer/convmixer-768-32.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/convnext/convnext-base.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/convnext/convnext-large.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/convnext/convnext-small.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/convnext/convnext-tiny.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/convnext/convnext-xlarge.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/densenet/densenet121.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/densenet/densenet161.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/densenet/densenet169.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/densenet/densenet201.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/efficientnet_b0.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/efficientnet_b1.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/efficientnet_b2.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/efficientnet_b3.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/efficientnet_b4.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/efficientnet_b5.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/efficientnet_b6.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/efficientnet_b7.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/efficientnet_b8.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/efficientnet_em.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/efficientnet_es.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/hornet/hornet-base-gf.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/hornet/hornet-base.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/hornet/hornet-large-gf.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/hornet/hornet-large-gf384.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/hornet/hornet-large.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/hornet/hornet-small-gf.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/hornet/hornet-small.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/hornet/hornet-tiny-gf.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/hornet/hornet-tiny.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/hrnet/hrnet-w18.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/hrnet/hrnet-w30.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/hrnet/hrnet-w32.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/hrnet/hrnet-w40.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/hrnet/hrnet-w44.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/hrnet/hrnet-w48.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/hrnet/hrnet-w64.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/mlp_mixer_base_patch16.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/mlp_mixer_large_patch16.py
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/configs/_base_/models/mobilenet_v2_1x.py (100%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/mobilenet_v3_large_imagenet.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/mobilenet_v3_small_cifar.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/mobilenet_v3_small_imagenet.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/mvit/mvitv2-base.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/mvit/mvitv2-large.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/mvit/mvitv2-small.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/mvit/mvitv2-tiny.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/poolformer/poolformer_m36.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/poolformer/poolformer_m48.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/poolformer/poolformer_s12.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/poolformer/poolformer_s24.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/poolformer/poolformer_s36.py
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/configs/_base_/models/regnet/regnetx_1.6gf.py (100%)
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/configs/_base_/models/regnet/regnetx_12gf.py (100%)
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/configs/_base_/models/regnet/regnetx_3.2gf.py (100%)
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/configs/_base_/models/regnet/regnetx_4.0gf.py (100%)
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/configs/_base_/models/regnet/regnetx_400mf.py (100%)
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/configs/_base_/models/regnet/regnetx_6.4gf.py (100%)
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/configs/_base_/models/regnet/regnetx_8.0gf.py (100%)
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/configs/_base_/models/regnet/regnetx_800mf.py (100%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/repmlp-base_224.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/repvgg-A0_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/repvgg-B3_lbs-mixup_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/res2net101-w26-s4.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/res2net50-w14-s8.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/res2net50-w26-s4.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/res2net50-w26-s6.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/res2net50-w26-s8.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/res2net50-w48-s2.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/resnest101.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/resnest200.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/resnest269.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/resnest50.py
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/configs/_base_/models/resnet101.py (100%)
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/configs/_base_/models/resnet101_cifar.py (100%)
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/configs/_base_/models/resnet152.py (100%)
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/configs/_base_/models/resnet152_cifar.py (100%)
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/configs/_base_/models/resnet18.py (100%)
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/configs/_base_/models/resnet18_cifar.py (100%)
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/configs/_base_/models/resnet34.py (100%)
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/configs/_base_/models/resnet34_cifar.py (100%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/resnet34_gem.py
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/configs/_base_/models/resnet50.py (100%)
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/configs/_base_/models/resnet50_cifar.py (100%)
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/configs/_base_/models/resnet50_cifar_cutmix.py (100%)
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/configs/_base_/models/resnet50_cifar_mixup.py (100%)
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/configs/_base_/models/resnet50_cutmix.py (100%)
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/configs/_base_/models/resnet50_label_smooth.py (100%)
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/configs/_base_/models/resnet50_mixup.py (100%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/resnetv1c50.py
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/configs/_base_/models/resnetv1d101.py (100%)
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/configs/_base_/models/resnetv1d152.py (100%)
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/configs/_base_/models/resnetv1d50.py (100%)
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/configs/_base_/models/resnext101_32x4d.py (100%)
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/configs/_base_/models/resnext101_32x8d.py (100%)
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/configs/_base_/models/resnext152_32x4d.py (100%)
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/configs/_base_/models/resnext50_32x4d.py (100%)
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/configs/_base_/models/seresnet101.py (100%)
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/configs/_base_/models/seresnet50.py (100%)
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/configs/_base_/models/seresnext101_32x4d.py (100%)
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/configs/_base_/models/seresnext50_32x4d.py (100%)
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/configs/_base_/models/shufflenet_v1_1x.py (100%)
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/configs/_base_/models/shufflenet_v2_1x.py (100%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/swin_transformer/base_224.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/swin_transformer/base_384.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/swin_transformer/large_224.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/swin_transformer/large_384.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/swin_transformer/small_224.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/swin_transformer/tiny_224.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/swin_transformer_v2/base_256.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/swin_transformer_v2/base_384.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/swin_transformer_v2/large_256.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/swin_transformer_v2/large_384.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/swin_transformer_v2/small_256.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/swin_transformer_v2/tiny_256.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/t2t-vit-t-14.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/t2t-vit-t-19.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/t2t-vit-t-24.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/tnt_s_patch16_224.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/twins_pcpvt_base.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/twins_svt_base.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/van/van_b0.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/van/van_b1.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/van/van_b2.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/van/van_b3.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/van/van_b4.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/van/van_b5.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/van/van_b6.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/van/van_base.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/van/van_large.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/van/van_small.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/van/van_tiny.py
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/configs/_base_/models/vgg11.py (100%)
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/configs/_base_/models/vgg11bn.py (100%)
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/configs/_base_/models/vgg13.py (100%)
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/configs/_base_/models/vgg13bn.py (100%)
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/configs/_base_/models/vgg16.py (100%)
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/configs/_base_/models/vgg16bn.py (100%)
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/configs/_base_/models/vgg19.py (100%)
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/configs/_base_/models/vgg19bn.py (100%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/vit-base-p16.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/vit-base-p32.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/vit-large-p16.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/vit-large-p32.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/models/wide-resnet50.py
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/configs/_base_/schedules/cifar10_bs128.py (100%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/schedules/cub_bs64.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/schedules/imagenet_bs1024_adamw_conformer.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/schedules/imagenet_bs1024_adamw_swin.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/schedules/imagenet_bs1024_coslr.py
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/configs/_base_/schedules/imagenet_bs1024_linearlr_bn_nowd.py (100%)
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/configs/_base_/schedules/imagenet_bs2048.py (100%)
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/configs/_base_/schedules/imagenet_bs2048_AdamW.py (100%)
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/configs/_base_/schedules/imagenet_bs2048_coslr.py (100%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/schedules/imagenet_bs2048_rsb.py
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/configs/_base_/schedules/imagenet_bs256.py (100%)
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/configs/_base_/schedules/imagenet_bs256_140e.py (100%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/schedules/imagenet_bs256_200e_coslr_warmup.py
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/configs/_base_/schedules/imagenet_bs256_coslr.py (100%)
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/configs/_base_/schedules/imagenet_bs256_epochstep.py (100%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/schedules/imagenet_bs4096_AdamW.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/_base_/schedules/stanford_cars_bs8.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/conformer/README.md
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/conformer/conformer-base-p16_8xb128_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/conformer/conformer-small-p16_8xb128_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/conformer/conformer-small-p32_8xb128_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/conformer/conformer-tiny-p16_8xb128_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/conformer/metafile.yml
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/convmixer/README.md
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/convmixer/convmixer-1024-20_10xb64_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/convmixer/convmixer-1536-20_10xb64_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/convmixer/convmixer-768-32_10xb64_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/convmixer/metafile.yml
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/convnext/README.md
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/convnext/convnext-base_32xb128_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/convnext/convnext-large_64xb64_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/convnext/convnext-small_32xb128_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/convnext/convnext-tiny_32xb128_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/convnext/convnext-xlarge_64xb64_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/convnext/metafile.yml
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/cspnet/README.md
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/cspnet/cspdarknet50_8xb32_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/cspnet/cspresnet50_8xb32_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/cspnet/cspresnext50_8xb32_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/cspnet/metafile.yml
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/csra/README.md
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/csra/metafile.yml
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/csra/resnet101-csra_1xb16_voc07-448px.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/deit/README.md
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/deit/deit-base-distilled_ft-16xb32_in1k-384px.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/deit/deit-base-distilled_pt-16xb64_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/deit/deit-base_ft-16xb32_in1k-384px.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/deit/deit-base_pt-16xb64_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/deit/deit-small-distilled_pt-4xb256_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/deit/deit-small_pt-4xb256_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/deit/deit-tiny-distilled_pt-4xb256_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/deit/deit-tiny_pt-4xb256_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/deit/metafile.yml
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/densenet/README.md
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/densenet/densenet121_4xb256_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/densenet/densenet161_4xb256_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/densenet/densenet169_4xb256_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/densenet/densenet201_4xb256_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/densenet/metafile.yml
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/efficientformer/README.md
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/efficientformer/efficientformer-l1_8xb128_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/efficientformer/efficientformer-l3_8xb128_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/efficientformer/efficientformer-l7_8xb128_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/efficientformer/metafile.yml
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/efficientnet/README.md
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/efficientnet/efficientnet-b0_8xb32-01norm_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/efficientnet/efficientnet-b0_8xb32_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/efficientnet/efficientnet-b1_8xb32-01norm_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/efficientnet/efficientnet-b1_8xb32_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/efficientnet/efficientnet-b2_8xb32-01norm_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/efficientnet/efficientnet-b2_8xb32_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/efficientnet/efficientnet-b3_8xb32-01norm_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/efficientnet/efficientnet-b3_8xb32_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/efficientnet/efficientnet-b4_8xb32-01norm_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/efficientnet/efficientnet-b4_8xb32_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/efficientnet/efficientnet-b5_8xb32-01norm_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/efficientnet/efficientnet-b5_8xb32_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/efficientnet/efficientnet-b6_8xb32-01norm_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/efficientnet/efficientnet-b6_8xb32_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/efficientnet/efficientnet-b7_8xb32-01norm_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/efficientnet/efficientnet-b7_8xb32_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/efficientnet/efficientnet-b8_8xb32-01norm_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/efficientnet/efficientnet-b8_8xb32_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/efficientnet/efficientnet-em_8xb32-01norm_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/efficientnet/efficientnet-es_8xb32-01norm_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/efficientnet/metafile.yml
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/fp16/resnet50_b32x8_fp16_dynamic_imagenet.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/fp16/resnet50_b32x8_fp16_imagenet.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/hornet/README.md
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/hornet/hornet-base-gf_8xb64_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/hornet/hornet-base_8xb64_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/hornet/hornet-small-gf_8xb64_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/hornet/hornet-small_8xb64_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/hornet/hornet-tiny-gf_8xb128_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/hornet/hornet-tiny_8xb128_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/hornet/metafile.yml
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/hrnet/README.md
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/hrnet/hrnet-w18_4xb32_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/hrnet/hrnet-w30_4xb32_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/hrnet/hrnet-w32_4xb32_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/hrnet/hrnet-w40_4xb32_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/hrnet/hrnet-w44_4xb32_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/hrnet/hrnet-w48_4xb32_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/hrnet/hrnet-w64_4xb32_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/hrnet/metafile.yml
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/lenet/README.md
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/configs/lenet/lenet5_mnist.py (100%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/mlp_mixer/README.md
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/mlp_mixer/metafile.yml
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/mlp_mixer/mlp-mixer-base-p16_64xb64_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/mlp_mixer/mlp-mixer-large-p16_64xb64_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/mobilenet_v2/README.md
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/mobilenet_v2/metafile.yml
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/mobilenet_v2/mobilenet-v2_8xb32_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/mobilenet_v2/mobilenet_v2_b32x8_imagenet.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/mobilenet_v3/README.md
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/mobilenet_v3/metafile.yml
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/mobilenet_v3/mobilenet-v3-large_8xb32_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/mobilenet_v3/mobilenet-v3-small_8xb16_cifar10.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/mobilenet_v3/mobilenet-v3-small_8xb32_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/mobilenet_v3/mobilenet_v3_large_imagenet.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/mobilenet_v3/mobilenet_v3_small_cifar.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/mobilenet_v3/mobilenet_v3_small_imagenet.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/mvit/README.md
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/mvit/metafile.yml
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/mvit/mvitv2-base_8xb256_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/mvit/mvitv2-large_8xb256_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/mvit/mvitv2-small_8xb256_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/mvit/mvitv2-tiny_8xb256_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/poolformer/README.md
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/poolformer/metafile.yml
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/poolformer/poolformer-m36_32xb128_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/poolformer/poolformer-m48_32xb128_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/poolformer/poolformer-s12_32xb128_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/poolformer/poolformer-s24_32xb128_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/poolformer/poolformer-s36_32xb128_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/regnet/README.md
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/regnet/metafile.yml
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/regnet/regnetx-1.6gf_8xb128_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/regnet/regnetx-12gf_8xb64_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/regnet/regnetx-3.2gf_8xb64_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/regnet/regnetx-4.0gf_8xb64_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/regnet/regnetx-400mf_8xb128_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/regnet/regnetx-6.4gf_8xb64_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/regnet/regnetx-8.0gf_8xb64_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/regnet/regnetx-800mf_8xb128_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/repmlp/README.md
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/repmlp/metafile.yml
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/repmlp/repmlp-base_8xb64_in1k-256px.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/repmlp/repmlp-base_8xb64_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/repmlp/repmlp-base_delopy_8xb64_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/repmlp/repmlp-base_deploy_8xb64_in1k-256px.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/repvgg/README.md
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/repvgg/deploy/repvgg-A0_deploy_4xb64-coslr-120e_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/repvgg/deploy/repvgg-A1_deploy_4xb64-coslr-120e_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/repvgg/deploy/repvgg-A2_deploy_4xb64-coslr-120e_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/repvgg/deploy/repvgg-B0_deploy_4xb64-coslr-120e_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/repvgg/deploy/repvgg-B1_deploy_4xb64-coslr-120e_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/repvgg/deploy/repvgg-B1g2_deploy_4xb64-coslr-120e_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/repvgg/deploy/repvgg-B1g4_deploy_4xb64-coslr-120e_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/repvgg/deploy/repvgg-B2_deploy_4xb64-coslr-120e_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/repvgg/deploy/repvgg-B2g4_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/repvgg/deploy/repvgg-B3_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/repvgg/deploy/repvgg-B3g4_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/repvgg/deploy/repvgg-D2se_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/repvgg/metafile.yml
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/repvgg/repvgg-A0_4xb64-coslr-120e_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/repvgg/repvgg-A1_4xb64-coslr-120e_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/repvgg/repvgg-A2_4xb64-coslr-120e_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/repvgg/repvgg-B0_4xb64-coslr-120e_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/repvgg/repvgg-B1_4xb64-coslr-120e_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/repvgg/repvgg-B1g2_4xb64-coslr-120e_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/repvgg/repvgg-B1g4_4xb64-coslr-120e_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/repvgg/repvgg-B2_4xb64-coslr-120e_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/repvgg/repvgg-B2g4_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/repvgg/repvgg-B3_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/repvgg/repvgg-B3g4_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/repvgg/repvgg-D2se_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/res2net/README.md
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/res2net/metafile.yml
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/res2net/res2net101-w26-s4_8xb32_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/res2net/res2net50-w14-s8_8xb32_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/res2net/res2net50-w26-s8_8xb32_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/resnest/README.md
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/resnest/resnest101_32xb64_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/resnest/resnest101_b64x32_imagenet.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/resnest/resnest200_64xb32_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/resnest/resnest200_b32x64_imagenet.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/resnest/resnest269_64xb32_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/resnest/resnest269_b32x64_imagenet.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/resnest/resnest50_32xb64_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/resnest/resnest50_b64x32_imagenet.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/resnet/README.md
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/resnet/metafile.yml
rename openmmlab_test/{mmclassification-speed-benchmark/configs/resnet/resnet101_b16x8_cifar10.py => mmclassification-0.24.1/configs/resnet/resnet101_8xb16_cifar10.py} (100%)
rename openmmlab_test/{mmclassification-speed-benchmark/configs/resnet/resnet101_b32x8_imagenet.py => mmclassification-0.24.1/configs/resnet/resnet101_8xb32_in1k.py} (100%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet101_b16x8_cifar10.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet101_b32x8_imagenet.py
rename openmmlab_test/{mmclassification-speed-benchmark/configs/resnet/resnet152_b16x8_cifar10.py => mmclassification-0.24.1/configs/resnet/resnet152_8xb16_cifar10.py} (100%)
rename openmmlab_test/{mmclassification-speed-benchmark/configs/resnet/resnet152_b32x8_imagenet.py => mmclassification-0.24.1/configs/resnet/resnet152_8xb32_in1k.py} (100%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet152_b16x8_cifar10.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet152_b32x8_imagenet.py
rename openmmlab_test/{mmclassification-speed-benchmark/configs/resnet/resnet18_b16x8_cifar10.py => mmclassification-0.24.1/configs/resnet/resnet18_8xb16_cifar10.py} (100%)
rename openmmlab_test/{mmclassification-speed-benchmark/configs/resnet/resnet18_b32x8_imagenet.py => mmclassification-0.24.1/configs/resnet/resnet18_8xb32_in1k.py} (100%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet18_b16x8_cifar10.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet18_b32x8_imagenet.py
rename openmmlab_test/{mmclassification-speed-benchmark/configs/resnet/resnet34_b16x8_cifar10.py => mmclassification-0.24.1/configs/resnet/resnet34_8xb16_cifar10.py} (100%)
rename openmmlab_test/{mmclassification-speed-benchmark/configs/resnet/resnet34_b32x8_imagenet.py => mmclassification-0.24.1/configs/resnet/resnet34_8xb32_in1k.py} (100%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet34_b16x8_cifar10.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet34_b32x8_imagenet.py
rename openmmlab_test/{mmclassification-speed-benchmark/configs/resnet/resnet50_b64x32_warmup_coslr_imagenet.py => mmclassification-0.24.1/configs/resnet/resnet50_32xb64-warmup-coslr_in1k.py} (100%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_32xb64-warmup-lbs_in1k.py
rename openmmlab_test/{mmclassification-speed-benchmark/configs/resnet/resnet50_b64x32_warmup_imagenet.py => mmclassification-0.24.1/configs/resnet/resnet50_32xb64-warmup_in1k.py} (100%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_8xb128_coslr-90e_in21k.py
rename openmmlab_test/{mmclassification-speed-benchmark/configs/resnet/resnet50_b16x8_cifar10_mixup.py => mmclassification-0.24.1/configs/resnet/resnet50_8xb16-mixup_cifar10.py} (100%)
rename openmmlab_test/{mmclassification-speed-benchmark/configs/resnet/resnet50_b16x8_cifar10.py => mmclassification-0.24.1/configs/resnet/resnet50_8xb16_cifar10.py} (100%)
rename openmmlab_test/{mmclassification-speed-benchmark/configs/resnet/resnet50_b16x8_cifar100.py => mmclassification-0.24.1/configs/resnet/resnet50_8xb16_cifar100.py} (100%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_8xb256-rsb-a1-600e_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_8xb256-rsb-a2-300e_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_8xb256-rsb-a3-100e_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_8xb32-coslr-preciseBN_in1k.py
rename openmmlab_test/{mmclassification-speed-benchmark/configs/resnet/resnet50_b32x8_coslr_imagenet.py => mmclassification-0.24.1/configs/resnet/resnet50_8xb32-coslr_in1k.py} (100%)
rename openmmlab_test/{mmclassification-speed-benchmark/configs/resnet/resnet50_b32x8_cutmix_imagenet.py => mmclassification-0.24.1/configs/resnet/resnet50_8xb32-cutmix_in1k.py} (100%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_8xb32-fp16-dynamic_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_8xb32-fp16_in1k.py
rename openmmlab_test/{mmclassification-speed-benchmark/configs/resnet/resnet50_b32x8_label_smooth_imagenet.py => mmclassification-0.24.1/configs/resnet/resnet50_8xb32-lbs_in1k.py} (100%)
rename openmmlab_test/{mmclassification-speed-benchmark/configs/resnet/resnet50_b32x8_mixup_imagenet.py => mmclassification-0.24.1/configs/resnet/resnet50_8xb32-mixup_in1k.py} (100%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_8xb32_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_8xb8_cars.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_8xb8_cub.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_b16x8_cifar10.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_b16x8_cifar100.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_b16x8_cifar10_mixup.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_b32x8_coslr_imagenet.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_b32x8_cutmix_imagenet.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_b32x8_imagenet.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_b32x8_label_smooth_imagenet.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_b32x8_mixup_imagenet.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_b64x32_warmup_coslr_imagenet.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_b64x32_warmup_imagenet.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_b64x32_warmup_label_smooth_imagenet.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/resnet/resnetv1c101_8xb32_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/resnet/resnetv1c152_8xb32_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/resnet/resnetv1c50_8xb32_in1k.py
rename openmmlab_test/{mmclassification-speed-benchmark/configs/resnet/resnetv1d101_b32x8_imagenet.py => mmclassification-0.24.1/configs/resnet/resnetv1d101_8xb32_in1k.py} (100%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/resnet/resnetv1d101_b32x8_imagenet.py
rename openmmlab_test/{mmclassification-speed-benchmark/configs/resnet/resnetv1d152_b32x8_imagenet.py => mmclassification-0.24.1/configs/resnet/resnetv1d152_8xb32_in1k.py} (100%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/resnet/resnetv1d152_b32x8_imagenet.py
rename openmmlab_test/{mmclassification-speed-benchmark/configs/resnet/resnetv1d50_b32x8_imagenet.py => mmclassification-0.24.1/configs/resnet/resnetv1d50_8xb32_in1k.py} (100%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/resnet/resnetv1d50_b32x8_imagenet.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/resnext/README.md
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/resnext/metafile.yml
rename openmmlab_test/{mmclassification-speed-benchmark/configs/resnext/resnext101_32x4d_b32x8_imagenet.py => mmclassification-0.24.1/configs/resnext/resnext101-32x4d_8xb32_in1k.py} (100%)
rename openmmlab_test/{mmclassification-speed-benchmark/configs/resnext/resnext101_32x8d_b32x8_imagenet.py => mmclassification-0.24.1/configs/resnext/resnext101-32x8d_8xb32_in1k.py} (100%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/resnext/resnext101_32x4d_b32x8_imagenet.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/resnext/resnext101_32x8d_b32x8_imagenet.py
rename openmmlab_test/{mmclassification-speed-benchmark/configs/resnext/resnext152_32x4d_b32x8_imagenet.py => mmclassification-0.24.1/configs/resnext/resnext152-32x4d_8xb32_in1k.py} (100%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/resnext/resnext152_32x4d_b32x8_imagenet.py
rename openmmlab_test/{mmclassification-speed-benchmark/configs/resnext/resnext50_32x4d_b32x8_imagenet.py => mmclassification-0.24.1/configs/resnext/resnext50-32x4d_8xb32_in1k.py} (100%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/resnext/resnext50_32x4d_b32x8_imagenet.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/seresnet/README.md
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/seresnet/metafile.yml
rename openmmlab_test/{mmclassification-speed-benchmark/configs/seresnet/seresnet101_b32x8_imagenet.py => mmclassification-0.24.1/configs/seresnet/seresnet101_8xb32_in1k.py} (100%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/seresnet/seresnet101_b32x8_imagenet.py
rename openmmlab_test/{mmclassification-speed-benchmark/configs/seresnet/seresnet50_b32x8_imagenet.py => mmclassification-0.24.1/configs/seresnet/seresnet50_8xb32_in1k.py} (100%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/seresnet/seresnet50_b32x8_imagenet.py
rename openmmlab_test/{mmclassification-speed-benchmark/configs/seresnext/seresnext101_32x4d_b32x8_imagenet.py => mmclassification-0.24.1/configs/seresnet/seresnext101-32x4d_8xb32_in1k.py} (100%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/seresnet/seresnext101_32x4d_b32x8_imagenet.py
rename openmmlab_test/{mmclassification-speed-benchmark/configs/seresnext/seresnext50_32x4d_b32x8_imagenet.py => mmclassification-0.24.1/configs/seresnet/seresnext50-32x4d_8xb32_in1k.py} (100%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/seresnet/seresnext50_32x4d_b32x8_imagenet.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/shufflenet_v1/README.md
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/shufflenet_v1/metafile.yml
rename openmmlab_test/{mmclassification-speed-benchmark/configs/shufflenet_v1/shufflenet_v1_1x_b64x16_linearlr_bn_nowd_imagenet.py => mmclassification-0.24.1/configs/shufflenet_v1/shufflenet-v1-1x_16xb64_in1k.py} (100%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/shufflenet_v1/shufflenet_v1_1x_b64x16_linearlr_bn_nowd_imagenet.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/shufflenet_v2/README.md
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/shufflenet_v2/metafile.yml
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/shufflenet_v2/shufflenet-v2-1x_16xb64_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/shufflenet_v2/shufflenet_v2_1x_b64x16_linearlr_bn_nowd_imagenet.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/swin_transformer/README.md
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/swin_transformer/metafile.yml
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/swin_transformer/swin-base_16xb64_in1k-384px.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/swin_transformer/swin-base_16xb64_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/swin_transformer/swin-large_16xb64_in1k-384px.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/swin_transformer/swin-large_16xb64_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/swin_transformer/swin-large_8xb8_cub_384px.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/swin_transformer/swin-small_16xb64_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/swin_transformer/swin-tiny_16xb64_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/swin_transformer/swin_base_224_b16x64_300e_imagenet.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/swin_transformer/swin_base_384_evalonly_imagenet.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/swin_transformer/swin_large_224_evalonly_imagenet.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/swin_transformer/swin_large_384_evalonly_imagenet.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/swin_transformer/swin_small_224_b16x64_300e_imagenet.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/swin_transformer/swin_tiny_224_b16x64_300e_imagenet.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/swin_transformer_v2/README.md
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/swin_transformer_v2/metafile.yml
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/swin_transformer_v2/swinv2-base-w16_16xb64_in1k-256px.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/swin_transformer_v2/swinv2-base-w16_in21k-pre_16xb64_in1k-256px.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/swin_transformer_v2/swinv2-base-w24_in21k-pre_16xb64_in1k-384px.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/swin_transformer_v2/swinv2-base-w8_16xb64_in1k-256px.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/swin_transformer_v2/swinv2-large-w16_in21k-pre_16xb64_in1k-256px.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/swin_transformer_v2/swinv2-large-w24_in21k-pre_16xb64_in1k-384px.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/swin_transformer_v2/swinv2-small-w16_16xb64_in1k-256px.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/swin_transformer_v2/swinv2-small-w8_16xb64_in1k-256px.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/swin_transformer_v2/swinv2-tiny-w16_16xb64_in1k-256px.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/swin_transformer_v2/swinv2-tiny-w8_16xb64_in1k-256px.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/t2t_vit/README.md
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/t2t_vit/metafile.yml
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/t2t_vit/t2t-vit-t-14_8xb64_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/t2t_vit/t2t-vit-t-19_8xb64_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/t2t_vit/t2t-vit-t-24_8xb64_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/tnt/README.md
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/tnt/metafile.yml
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/tnt/tnt-s-p16_16xb64_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/tnt/tnt_s_patch16_224_evalonly_imagenet.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/twins/README.md
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/twins/metafile.yml
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/twins/twins-pcpvt-base_8xb128_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/twins/twins-pcpvt-large_16xb64_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/twins/twins-pcpvt-small_8xb128_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/twins/twins-svt-base_8xb128_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/twins/twins-svt-large_16xb64_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/twins/twins-svt-small_8xb128_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/van/README.md
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/van/metafile.yml
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/van/van-b0_8xb128_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/van/van-b1_8xb128_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/van/van-b2_8xb128_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/van/van-b3_8xb128_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/van/van-b4_8xb128_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/van/van-base_8xb128_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/van/van-large_8xb128_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/van/van-small_8xb128_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/van/van-tiny_8xb128_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/vgg/README.md
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/vgg/metafile.yml
rename openmmlab_test/{mmclassification-speed-benchmark/configs/vgg/vgg11_b32x8_imagenet.py => mmclassification-0.24.1/configs/vgg/vgg11_8xb32_in1k.py} (100%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/vgg/vgg11_b32x8_imagenet.py
rename openmmlab_test/{mmclassification-speed-benchmark/configs/vgg/vgg11bn_b32x8_imagenet.py => mmclassification-0.24.1/configs/vgg/vgg11bn_8xb32_in1k.py} (100%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/vgg/vgg11bn_b32x8_imagenet.py
rename openmmlab_test/{mmclassification-speed-benchmark/configs/vgg/vgg13_b32x8_imagenet.py => mmclassification-0.24.1/configs/vgg/vgg13_8xb32_in1k.py} (100%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/vgg/vgg13_b32x8_imagenet.py
rename openmmlab_test/{mmclassification-speed-benchmark/configs/vgg/vgg13bn_b32x8_imagenet.py => mmclassification-0.24.1/configs/vgg/vgg13bn_8xb32_in1k.py} (100%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/vgg/vgg13bn_b32x8_imagenet.py
rename openmmlab_test/{mmclassification-speed-benchmark/configs/vgg/vgg16_b16x8_voc.py => mmclassification-0.24.1/configs/vgg/vgg16_8xb16_voc.py} (100%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/vgg/vgg16_8xb32_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/vgg/vgg16_b16x8_voc.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/vgg/vgg16_b32x8_imagenet.py
rename openmmlab_test/{mmclassification-speed-benchmark/configs/vgg/vgg16bn_b32x8_imagenet.py => mmclassification-0.24.1/configs/vgg/vgg16bn_8xb32_in1k.py} (100%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/vgg/vgg16bn_b32x8_imagenet.py
rename openmmlab_test/{mmclassification-speed-benchmark/configs/vgg/vgg19_b32x8_imagenet.py => mmclassification-0.24.1/configs/vgg/vgg19_8xb32_in1k.py} (100%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/vgg/vgg19_b32x8_imagenet.py
rename openmmlab_test/{mmclassification-speed-benchmark/configs/vgg/vgg19bn_b32x8_imagenet.py => mmclassification-0.24.1/configs/vgg/vgg19bn_8xb32_in1k.py} (100%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/vgg/vgg19bn_b32x8_imagenet.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/vision_transformer/README.md
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/vision_transformer/metafile.yml
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/vision_transformer/vit-base-p16_ft-4xb544-ipu_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/vision_transformer/vit-base-p16_ft-64xb64_in1k-384.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/vision_transformer/vit-base-p16_pt-64xb64_in1k-224.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/vision_transformer/vit-base-p32_ft-64xb64_in1k-384.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/vision_transformer/vit-base-p32_pt-64xb64_in1k-224.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/vision_transformer/vit-large-p16_ft-64xb64_in1k-384.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/vision_transformer/vit-large-p16_pt-64xb64_in1k-224.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/vision_transformer/vit-large-p32_ft-64xb64_in1k-384.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/vision_transformer/vit-large-p32_pt-64xb64_in1k-224.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/wrn/README.md
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/wrn/metafile.yml
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/wrn/wide-resnet101_8xb32_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/wrn/wide-resnet50_8xb32_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/configs/wrn/wide-resnet50_timm_8xb32_in1k.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/demo/bird.JPEG
create mode 100644 openmmlab_test/mmclassification-0.24.1/demo/cat-dog.png
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/demo/demo.JPEG (100%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/demo/dog.jpg
create mode 100644 openmmlab_test/mmclassification-0.24.1/demo/image_demo.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/demo/ipu_train_example.sh
create mode 100644 openmmlab_test/mmclassification-0.24.1/docker/Dockerfile
create mode 100644 openmmlab_test/mmclassification-0.24.1/docker/serve/Dockerfile
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/docker/serve/config.properties (100%)
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/docker/serve/entrypoint.sh (100%)
rename openmmlab_test/{mmclassification-speed-benchmark/docs => mmclassification-0.24.1/docs/en}/Makefile (100%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/docs/en/_static/css/readthedocs.css
rename openmmlab_test/{mmclassification-speed-benchmark/resources => mmclassification-0.24.1/docs/en/_static/image}/mmcls-logo.png (100%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/docs/en/_static/image/tools/analysis/analyze_log.jpg
create mode 100644 openmmlab_test/mmclassification-0.24.1/docs/en/_static/image/tools/visualization/lr_schedule1.png
create mode 100644 openmmlab_test/mmclassification-0.24.1/docs/en/_static/image/tools/visualization/lr_schedule2.png
create mode 100644 openmmlab_test/mmclassification-0.24.1/docs/en/_static/js/custom.js
create mode 100644 openmmlab_test/mmclassification-0.24.1/docs/en/_templates/classtemplate.rst
create mode 100644 openmmlab_test/mmclassification-0.24.1/docs/en/api/apis.rst
create mode 100644 openmmlab_test/mmclassification-0.24.1/docs/en/api/core.rst
create mode 100644 openmmlab_test/mmclassification-0.24.1/docs/en/api/datasets.rst
create mode 100644 openmmlab_test/mmclassification-0.24.1/docs/en/api/models.rst
create mode 100644 openmmlab_test/mmclassification-0.24.1/docs/en/api/models.utils.augment.rst
create mode 100644 openmmlab_test/mmclassification-0.24.1/docs/en/api/models.utils.rst
create mode 100644 openmmlab_test/mmclassification-0.24.1/docs/en/api/transforms.rst
create mode 100644 openmmlab_test/mmclassification-0.24.1/docs/en/api/utils.rst
create mode 100644 openmmlab_test/mmclassification-0.24.1/docs/en/changelog.md
create mode 100644 openmmlab_test/mmclassification-0.24.1/docs/en/compatibility.md
create mode 100644 openmmlab_test/mmclassification-0.24.1/docs/en/conf.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/docs/en/device/npu.md
create mode 100644 openmmlab_test/mmclassification-0.24.1/docs/en/docutils.conf
create mode 100644 openmmlab_test/mmclassification-0.24.1/docs/en/faq.md
create mode 100644 openmmlab_test/mmclassification-0.24.1/docs/en/getting_started.md
create mode 100644 openmmlab_test/mmclassification-0.24.1/docs/en/index.rst
create mode 100644 openmmlab_test/mmclassification-0.24.1/docs/en/install.md
create mode 100644 openmmlab_test/mmclassification-0.24.1/docs/en/model_zoo.md
create mode 100644 openmmlab_test/mmclassification-0.24.1/docs/en/stat.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/docs/en/tools/analysis.md
create mode 100644 openmmlab_test/mmclassification-0.24.1/docs/en/tools/miscellaneous.md
create mode 100644 openmmlab_test/mmclassification-0.24.1/docs/en/tools/model_serving.md
create mode 100644 openmmlab_test/mmclassification-0.24.1/docs/en/tools/onnx2tensorrt.md
create mode 100644 openmmlab_test/mmclassification-0.24.1/docs/en/tools/pytorch2onnx.md
create mode 100644 openmmlab_test/mmclassification-0.24.1/docs/en/tools/pytorch2torchscript.md
create mode 100644 openmmlab_test/mmclassification-0.24.1/docs/en/tools/visualization.md
create mode 100644 openmmlab_test/mmclassification-0.24.1/docs/en/tutorials/MMClassification_python.ipynb
create mode 100644 openmmlab_test/mmclassification-0.24.1/docs/en/tutorials/MMClassification_tools.ipynb
create mode 100644 openmmlab_test/mmclassification-0.24.1/docs/en/tutorials/config.md
create mode 100644 openmmlab_test/mmclassification-0.24.1/docs/en/tutorials/data_pipeline.md
create mode 100644 openmmlab_test/mmclassification-0.24.1/docs/en/tutorials/finetune.md
create mode 100644 openmmlab_test/mmclassification-0.24.1/docs/en/tutorials/new_dataset.md
create mode 100644 openmmlab_test/mmclassification-0.24.1/docs/en/tutorials/new_modules.md
create mode 100644 openmmlab_test/mmclassification-0.24.1/docs/en/tutorials/runtime.md
create mode 100644 openmmlab_test/mmclassification-0.24.1/docs/en/tutorials/schedule.md
rename openmmlab_test/{mmclassification-speed-benchmark/docs_zh-CN => mmclassification-0.24.1/docs/zh_CN}/Makefile (100%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/docs/zh_CN/_static/css/readthedocs.css
create mode 100644 openmmlab_test/mmclassification-0.24.1/docs/zh_CN/_static/image/mmcls-logo.png
create mode 100644 openmmlab_test/mmclassification-0.24.1/docs/zh_CN/_static/image/tools/analysis/analyze_log.jpg
create mode 100644 openmmlab_test/mmclassification-0.24.1/docs/zh_CN/_static/image/tools/visualization/lr_schedule1.png
create mode 100644 openmmlab_test/mmclassification-0.24.1/docs/zh_CN/_static/image/tools/visualization/lr_schedule2.png
create mode 100644 openmmlab_test/mmclassification-0.24.1/docs/zh_CN/_static/js/custom.js
create mode 100644 openmmlab_test/mmclassification-0.24.1/docs/zh_CN/community/CONTRIBUTING.md
create mode 100644 openmmlab_test/mmclassification-0.24.1/docs/zh_CN/compatibility.md
create mode 100644 openmmlab_test/mmclassification-0.24.1/docs/zh_CN/conf.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/docs/zh_CN/device/npu.md
create mode 100644 openmmlab_test/mmclassification-0.24.1/docs/zh_CN/docutils.conf
create mode 100644 openmmlab_test/mmclassification-0.24.1/docs/zh_CN/faq.md
create mode 100644 openmmlab_test/mmclassification-0.24.1/docs/zh_CN/getting_started.md
create mode 100644 openmmlab_test/mmclassification-0.24.1/docs/zh_CN/imgs/qq_group_qrcode.jpg
rename openmmlab_test/{mmclassification-speed-benchmark/docs => mmclassification-0.24.1/docs/zh_CN}/imgs/zhihu_qrcode.jpg (100%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/docs/zh_CN/index.rst
create mode 100644 openmmlab_test/mmclassification-0.24.1/docs/zh_CN/install.md
create mode 100644 openmmlab_test/mmclassification-0.24.1/docs/zh_CN/stat.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/docs/zh_CN/tools/analysis.md
create mode 100644 openmmlab_test/mmclassification-0.24.1/docs/zh_CN/tools/miscellaneous.md
create mode 100644 openmmlab_test/mmclassification-0.24.1/docs/zh_CN/tools/model_serving.md
create mode 100644 openmmlab_test/mmclassification-0.24.1/docs/zh_CN/tools/onnx2tensorrt.md
create mode 100644 openmmlab_test/mmclassification-0.24.1/docs/zh_CN/tools/pytorch2onnx.md
create mode 100644 openmmlab_test/mmclassification-0.24.1/docs/zh_CN/tools/pytorch2torchscript.md
create mode 100644 openmmlab_test/mmclassification-0.24.1/docs/zh_CN/tools/visualization.md
create mode 100644 openmmlab_test/mmclassification-0.24.1/docs/zh_CN/tutorials/MMClassification_python_cn.ipynb
create mode 100644 openmmlab_test/mmclassification-0.24.1/docs/zh_CN/tutorials/MMClassification_tools_cn.ipynb
create mode 100644 openmmlab_test/mmclassification-0.24.1/docs/zh_CN/tutorials/config.md
create mode 100644 openmmlab_test/mmclassification-0.24.1/docs/zh_CN/tutorials/data_pipeline.md
create mode 100644 openmmlab_test/mmclassification-0.24.1/docs/zh_CN/tutorials/finetune.md
create mode 100644 openmmlab_test/mmclassification-0.24.1/docs/zh_CN/tutorials/new_dataset.md
create mode 100644 openmmlab_test/mmclassification-0.24.1/docs/zh_CN/tutorials/new_modules.md
create mode 100644 openmmlab_test/mmclassification-0.24.1/docs/zh_CN/tutorials/runtime.md
create mode 100644 openmmlab_test/mmclassification-0.24.1/docs/zh_CN/tutorials/schedule.md
create mode 100644 openmmlab_test/mmclassification-0.24.1/hostfile
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/__init__.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/apis/__init__.py
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/mmcls/apis/inference.py (82%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/apis/test.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/apis/test_old.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/apis/test_time.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/apis/train.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/core/__init__.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/core/evaluation/__init__.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/core/evaluation/eval_hooks.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/core/evaluation/eval_metrics.py
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/mmcls/core/evaluation/mean_ap.py (92%)
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/mmcls/core/evaluation/multilabel_eval_metrics.py (98%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/core/export/__init__.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/core/export/test.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/core/hook/__init__.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/core/hook/class_num_check_hook.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/core/hook/lr_updater.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/core/hook/precise_bn_hook.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/core/hook/wandblogger_hook.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/core/optimizers/__init__.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/core/optimizers/lamb.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/core/utils/__init__.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/core/utils/dist_utils.py
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/mmcls/core/utils/misc.py (81%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/core/visualization/__init__.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/core/visualization/image.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/datasets/__init__.py
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/mmcls/datasets/base_dataset.py (81%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/datasets/builder.py
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/mmcls/datasets/cifar.py (76%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/datasets/cub.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/datasets/custom.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/datasets/dataset_wrappers.py
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/mmcls/datasets/imagenet.py (91%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/datasets/imagenet21k.py
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/mmcls/datasets/mnist.py (98%)
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/mmcls/datasets/multi_label.py (81%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/datasets/pipelines/__init__.py
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/mmcls/datasets/pipelines/auto_augment.py (88%)
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/mmcls/datasets/pipelines/compose.py (96%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/datasets/pipelines/formatting.py
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/mmcls/datasets/pipelines/loading.py (98%)
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/mmcls/datasets/pipelines/transforms.py (84%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/datasets/samplers/__init__.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/datasets/samplers/distributed_sampler.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/datasets/samplers/repeat_aug.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/datasets/stanford_cars.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/datasets/utils.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/datasets/voc.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/models/__init__.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/models/backbones/__init__.py
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/mmcls/models/backbones/alexnet.py (96%)
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/mmcls/models/backbones/base_backbone.py (94%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/models/backbones/conformer.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/models/backbones/convmixer.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/models/backbones/convnext.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/models/backbones/cspnet.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/models/backbones/deit.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/models/backbones/densenet.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/models/backbones/efficientformer.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/models/backbones/efficientnet.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/models/backbones/hornet.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/models/backbones/hrnet.py
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/mmcls/models/backbones/lenet.py (94%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/models/backbones/mlp_mixer.py
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/mmcls/models/backbones/mobilenet_v2.py (91%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/models/backbones/mobilenet_v3.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/models/backbones/mvit.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/models/backbones/poolformer.py
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/mmcls/models/backbones/regnet.py (85%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/models/backbones/repmlp.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/models/backbones/repvgg.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/models/backbones/res2net.py
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/mmcls/models/backbones/resnest.py (99%)
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/mmcls/models/backbones/resnet.py (90%)
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/mmcls/models/backbones/resnet_cifar.py (97%)
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/mmcls/models/backbones/resnext.py (99%)
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/mmcls/models/backbones/seresnet.py (98%)
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/mmcls/models/backbones/seresnext.py (99%)
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/mmcls/models/backbones/shufflenet_v1.py (95%)
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/mmcls/models/backbones/shufflenet_v2.py (92%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/models/backbones/swin_transformer.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/models/backbones/swin_transformer_v2.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/models/backbones/t2t_vit.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/models/backbones/timm_backbone.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/models/backbones/tnt.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/models/backbones/twins.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/models/backbones/van.py
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/mmcls/models/backbones/vgg.py (91%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/models/backbones/vision_transformer.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/models/builder.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/models/classifiers/__init__.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/models/classifiers/base.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/models/classifiers/image.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/models/heads/__init__.py
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/mmcls/models/heads/base_head.py (86%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/models/heads/cls_head.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/models/heads/conformer_head.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/models/heads/deit_head.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/models/heads/efficientformer_head.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/models/heads/linear_head.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/models/heads/multi_label_csra_head.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/models/heads/multi_label_head.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/models/heads/multi_label_linear_head.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/models/heads/stacked_head.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/models/heads/vision_transformer_head.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/models/losses/__init__.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/models/losses/accuracy.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/models/losses/asymmetric_loss.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/models/losses/cross_entropy_loss.py
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/mmcls/models/losses/focal_loss.py (84%)
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/mmcls/models/losses/label_smooth_loss.py (81%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/models/losses/seesaw_loss.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/models/losses/utils.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/models/necks/__init__.py
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/mmcls/models/necks/gap.py (96%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/models/necks/gem.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/models/necks/hr_fuse.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/models/utils/__init__.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/models/utils/attention.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/models/utils/augment/__init__.py
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/mmcls/models/utils/augment/augments.py (98%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/models/utils/augment/builder.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/models/utils/augment/cutmix.py
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/mmcls/models/utils/augment/identity.py (83%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/models/utils/augment/mixup.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/models/utils/augment/resizemix.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/models/utils/augment/utils.py
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/mmcls/models/utils/channel_shuffle.py (94%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/models/utils/embed.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/models/utils/helpers.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/models/utils/inverted_residual.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/models/utils/layer_scale.py
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/mmcls/models/utils/make_divisible.py (95%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/models/utils/position_encoding.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/models/utils/se_layer.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/utils/__init__.py
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/mmcls/utils/collect_env.py (89%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/utils/device.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/utils/distribution.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/utils/logger.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/mmcls/utils/setup_env.py
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/mmcls/version.py (91%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/model-index.yml
create mode 100644 openmmlab_test/mmclassification-0.24.1/mult_test.sh
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/requirements.txt (100%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/requirements/docs.txt
create mode 100644 openmmlab_test/mmclassification-0.24.1/requirements/mminstall.txt
create mode 100644 openmmlab_test/mmclassification-0.24.1/requirements/optional.txt
create mode 100644 openmmlab_test/mmclassification-0.24.1/requirements/readthedocs.txt
create mode 100644 openmmlab_test/mmclassification-0.24.1/requirements/runtime.txt
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/requirements/tests.txt (92%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/resources/mmcls-logo.png
create mode 100644 openmmlab_test/mmclassification-0.24.1/setup.cfg
create mode 100644 openmmlab_test/mmclassification-0.24.1/setup.py
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/sing_test.sh (77%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/single_process.sh
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/tests/data/color.jpg (100%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/tests/data/dataset/a/1.JPG
create mode 100644 openmmlab_test/mmclassification-0.24.1/tests/data/dataset/ann.txt
create mode 100644 openmmlab_test/mmclassification-0.24.1/tests/data/dataset/b/2.jpeg
create mode 100644 openmmlab_test/mmclassification-0.24.1/tests/data/dataset/b/subb/3.jpg
create mode 100644 openmmlab_test/mmclassification-0.24.1/tests/data/dataset/classes.txt
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/tests/data/gray.jpg (100%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/tests/data/retinanet.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/tests/data/test.logjson
create mode 100644 openmmlab_test/mmclassification-0.24.1/tests/test_data/test_builder.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/tests/test_data/test_datasets/test_common.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/tests/test_data/test_datasets/test_dataset_utils.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/tests/test_data/test_datasets/test_dataset_wrapper.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/tests/test_data/test_datasets/test_sampler.py
rename openmmlab_test/{mmclassification-speed-benchmark/tests => mmclassification-0.24.1/tests/test_data}/test_pipelines/test_auto_augment.py (92%)
rename openmmlab_test/{mmclassification-speed-benchmark/tests => mmclassification-0.24.1/tests/test_data}/test_pipelines/test_loading.py (94%)
rename openmmlab_test/{mmclassification-speed-benchmark/tests => mmclassification-0.24.1/tests/test_data}/test_pipelines/test_transform.py (89%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/tests/test_downstream/test_mmdet_inference.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/tests/test_metrics/test_losses.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/tests/test_metrics/test_metrics.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/tests/test_metrics/test_utils.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/tests/test_models/test_backbones/__init__.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/tests/test_models/test_backbones/test_conformer.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/tests/test_models/test_backbones/test_convmixer.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/tests/test_models/test_backbones/test_convnext.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/tests/test_models/test_backbones/test_cspnet.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/tests/test_models/test_backbones/test_deit.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/tests/test_models/test_backbones/test_densenet.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/tests/test_models/test_backbones/test_efficientformer.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/tests/test_models/test_backbones/test_efficientnet.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/tests/test_models/test_backbones/test_hornet.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/tests/test_models/test_backbones/test_hrnet.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/tests/test_models/test_backbones/test_mlp_mixer.py
rename openmmlab_test/{mmclassification-speed-benchmark/tests => mmclassification-0.24.1/tests/test_models}/test_backbones/test_mobilenet_v2.py (97%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/tests/test_models/test_backbones/test_mobilenet_v3.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/tests/test_models/test_backbones/test_mvit.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/tests/test_models/test_backbones/test_poolformer.py
rename openmmlab_test/{mmclassification-speed-benchmark/tests => mmclassification-0.24.1/tests/test_models}/test_backbones/test_regnet.py (90%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/tests/test_models/test_backbones/test_repmlp.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/tests/test_models/test_backbones/test_repvgg.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/tests/test_models/test_backbones/test_res2net.py
rename openmmlab_test/{mmclassification-speed-benchmark/tests => mmclassification-0.24.1/tests/test_models}/test_backbones/test_resnest.py (96%)
rename openmmlab_test/{mmclassification-speed-benchmark/tests => mmclassification-0.24.1/tests/test_models}/test_backbones/test_resnet.py (91%)
rename openmmlab_test/{mmclassification-speed-benchmark/tests => mmclassification-0.24.1/tests/test_models}/test_backbones/test_resnet_cifar.py (97%)
rename openmmlab_test/{mmclassification-speed-benchmark/tests => mmclassification-0.24.1/tests/test_models}/test_backbones/test_resnext.py (93%)
rename openmmlab_test/{mmclassification-speed-benchmark/tests => mmclassification-0.24.1/tests/test_models}/test_backbones/test_seresnet.py (98%)
rename openmmlab_test/{mmclassification-speed-benchmark/tests => mmclassification-0.24.1/tests/test_models}/test_backbones/test_seresnext.py (94%)
rename openmmlab_test/{mmclassification-speed-benchmark/tests => mmclassification-0.24.1/tests/test_models}/test_backbones/test_shufflenet_v1.py (97%)
rename openmmlab_test/{mmclassification-speed-benchmark/tests => mmclassification-0.24.1/tests/test_models}/test_backbones/test_shufflenet_v2.py (97%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/tests/test_models/test_backbones/test_swin_transformer.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/tests/test_models/test_backbones/test_swin_transformer_v2.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/tests/test_models/test_backbones/test_t2t_vit.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/tests/test_models/test_backbones/test_timm_backbone.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/tests/test_models/test_backbones/test_tnt.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/tests/test_models/test_backbones/test_twins.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/tests/test_models/test_backbones/test_van.py
rename openmmlab_test/{mmclassification-speed-benchmark/tests => mmclassification-0.24.1/tests/test_models}/test_backbones/test_vgg.py (95%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/tests/test_models/test_backbones/test_vision_transformer.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/tests/test_models/test_backbones/utils.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/tests/test_models/test_classifiers.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/tests/test_models/test_heads.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/tests/test_models/test_neck.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/tests/test_models/test_utils/test_attention.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/tests/test_models/test_utils/test_augment.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/tests/test_models/test_utils/test_embed.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/tests/test_models/test_utils/test_inverted_residual.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/tests/test_models/test_utils/test_layer_scale.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/tests/test_models/test_utils/test_misc.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/tests/test_models/test_utils/test_position_encoding.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/tests/test_models/test_utils/test_se.py
rename openmmlab_test/{mmclassification-speed-benchmark/tests => mmclassification-0.24.1/tests/test_runtime}/test_eval_hook.py (89%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/tests/test_runtime/test_hooks.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/tests/test_runtime/test_num_class_hook.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/tests/test_runtime/test_optimizer.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/tests/test_runtime/test_preciseBN_hook.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/tests/test_utils/test_device.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/tests/test_utils/test_logger.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/tests/test_utils/test_setup_env.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/tests/test_utils/test_version_utils.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/tests/test_utils/test_visualization.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/tools/analysis_tools/analyze_logs.py
rename openmmlab_test/{mmclassification-speed-benchmark/tools => mmclassification-0.24.1/tools/analysis_tools}/analyze_results.py (75%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/tools/analysis_tools/eval_metric.py
rename openmmlab_test/{mmclassification-speed-benchmark/tools => mmclassification-0.24.1/tools/analysis_tools}/get_flops.py (91%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/tools/convert_models/efficientnet_to_mmcls.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/tools/convert_models/hornet2mmcls.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/tools/convert_models/mlpmixer_to_mmcls.py
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/tools/convert_models/mobilenetv2_to_mmcls.py (98%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/tools/convert_models/publish_model.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/tools/convert_models/reparameterize_model.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/tools/convert_models/reparameterize_repvgg.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/tools/convert_models/repvgg_to_mmcls.py
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/tools/convert_models/shufflenetv2_to_mmcls.py (98%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/tools/convert_models/torchvision_to_mmcls.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/tools/convert_models/twins2mmcls.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/tools/convert_models/van2mmcls.py
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/tools/convert_models/vgg_to_mmcls.py (98%)
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/tools/deployment/mmcls2torchserve.py (98%)
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/tools/deployment/mmcls_handler.py (97%)
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/tools/deployment/onnx2tensorrt.py (86%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/tools/deployment/pytorch2mlmodel.py
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/tools/deployment/pytorch2onnx.py (81%)
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/tools/deployment/pytorch2torchscript.py (97%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/tools/deployment/test.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/tools/deployment/test_torchserver.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/tools/dist_test.sh
create mode 100644 openmmlab_test/mmclassification-0.24.1/tools/dist_train.sh
create mode 100644 openmmlab_test/mmclassification-0.24.1/tools/kfold-cross-valid.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/tools/misc/print_config.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/tools/misc/verify_dataset.py
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/tools/slurm_test.sh (100%)
rename openmmlab_test/{mmclassification-speed-benchmark => mmclassification-0.24.1}/tools/slurm_train.sh (100%)
create mode 100644 openmmlab_test/mmclassification-0.24.1/tools/test.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/tools/train.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/tools/visualizations/vis_cam.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/tools/visualizations/vis_lr.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/tools/visualizations/vis_pipeline.py
create mode 100644 openmmlab_test/mmclassification-0.24.1/train.md
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/.github/CONTRIBUTING.md
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/.github/workflows/build.yml
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/.github/workflows/deploy.yml
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/.gitignore
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/.idea/.gitignore
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/.idea/.name
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/.idea/inspectionProfiles/profiles_settings.xml
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/.idea/mmclassification-speed-benchmark.iml
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/.idea/modules.xml
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/.idea/vcs.xml
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/.pre-commit-config.yaml
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/.readthedocs.yml
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/MANIFEST.in
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/README.md
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/README_zh-CN.md
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/_base_/datasets/imagenet_bs32.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/AlexNet_1x.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/resnest101.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/resnest200.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/resnest269.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/resnest50.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/vit_base_patch16_224_finetune.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/vit_base_patch16_224_pretrain.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/vit_base_patch16_384_finetune.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/vit_base_patch32_384_finetune.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/vit_large_patch16_224_finetune.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/vit_large_patch16_384_finetune.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/vit_large_patch32_384_finetune.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/_base_/schedules/imagenet_bs4096_AdamW.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/fp16/README.md
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/fp16/metafile.yml
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/fp16/resnet152_b32x8_fp16_imagenet.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/fp16/resnet18_b32x8_fp16_imagenet.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/fp16/resnet34_b32x8_fp16_imagenet.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/fp16/resnet50_b32x8_fp16_dynamic_imagenet.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/fp16/resnet50_b32x8_fp16_imagenet.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/fp16/resnext50_32x4d_b32x8_fp16_imagenet.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/fp16/seresnet50_b32x8_fp16_imagenet.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/fp16/shufflenet_v1_1x_b64x16_linearlr_bn_nowd_fp16_imagenet.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/fp16/shufflenet_v2_1x_b64x16_linearlr_bn_nowd_fp16_imagenet.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/fp16/vgg11_b32x8_fp16_imagenet.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/lenet/README.md
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/mobilenet_v2/README.md
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/mobilenet_v2/metafile.yml
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/mobilenet_v2/mobilenet_v2_b32x8_imagenet.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/regnet/README.md
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/regnet/regnetx_1.6gf_b32x8_imagenet.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/regnet/regnetx_12gf_b32x8_imagenet.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/regnet/regnetx_3.2gf_b32x8_imagenet.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/regnet/regnetx_4.0gf_b32x8_imagenet.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/regnet/regnetx_400mf_b32x8_imagenet.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/regnet/regnetx_6.4gf_b32x8_imagenet.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/regnet/regnetx_8.0gf_b32x8_imagenet.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/regnet/regnetx_800mf_b32x8_imagenet.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/resnet/README.md
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/resnet/metafile.yml
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/resnet/resnet50_b32x8_imagenet.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/resnet/resnet50_b64x32_warmup_label_smooth_imagenet.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/resnext/README.md
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/resnext/metafile.yml
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/seresnet/README.md
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/seresnet/metafile.yml
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/seresnext/README.md
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/shufflenet_v1/README.md
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/shufflenet_v1/metafile.yml
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/shufflenet_v2/README.md
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/shufflenet_v2/metafile.yml
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/shufflenet_v2/shufflenet_v2_1x_b64x16_linearlr_bn_nowd_imagenet.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/speed_test/AlexNet.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/speed_test/datasets/imagenet_bs32.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/speed_test/datasets/imagenet_bs64.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/speed_test/mobilenet_v2_b32x8_fp16_imagenet.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/speed_test/mobilenet_v2_b32x8_imagenet.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/speed_test/resnet152_b32x8_fp16_imagenet.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/speed_test/resnet152_b32x8_imagenet.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/speed_test/resnet18_b32x8_fp16_imagenet.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/speed_test/resnet18_b32x8_imagenet.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/speed_test/resnet34_b32x8_fp16_imagenet.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/speed_test/resnet34_b32x8_imagenet.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/speed_test/resnet50_b32x8_fp16_imagenet.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/speed_test/resnet50_b32x8_imagenet.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/speed_test/resnext50_32x4d_b32x8_fp16_imagenet.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/speed_test/resnext50_32x4d_b32x8_imagenet.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/speed_test/seresnet50_b32x8_fp16_imagenet.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/speed_test/seresnet50_b32x8_imagenet.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/speed_test/shufflenet_v1_1x_b64x16_linearlr_bn_nowd_fp16_imagenet.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/speed_test/shufflenet_v1_1x_b64x16_linearlr_bn_nowd_imagenet.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/speed_test/shufflenet_v2_1x_b64x16_linearlr_bn_nowd_fp16_imagenet.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/speed_test/shufflenet_v2_1x_b64x16_linearlr_bn_nowd_imagenet.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/speed_test/vgg11_b32x8_fp16_imagenet.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/speed_test/vgg11_b32x8_imagenet.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/vgg/README.md
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/vgg/metafile.yml
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/vgg/vgg16_b32x8_imagenet.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/vision_transformer/vit_base_patch16_224_finetune_imagenet.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/vision_transformer/vit_base_patch16_224_pretrain_imagenet.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/vision_transformer/vit_base_patch16_384_finetune_imagenet.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/vision_transformer/vit_base_patch32_384_finetune_imagenet.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/vision_transformer/vit_large_patch16_224_finetune_imagenet.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/vision_transformer/vit_large_patch16_384_finetune_imagenet.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/configs/vision_transformer/vit_large_patch32_384_finetune_imagenet.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/demo/image_demo.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/docker/serve/Dockerfile
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/docs/changelog.md
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/docs/conf.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/docs/getting_started.md
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/docs/imgs/qq_group_qrcode.jpg
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/docs/index.rst
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/docs/install.md
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/docs/model_zoo.md
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/docs/stat.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/docs/switch_language.md
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/docs/tutorials/MMClassification_Tutorial.ipynb
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/docs/tutorials/data_pipeline.md
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/docs/tutorials/finetune.md
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/docs/tutorials/model_serving.md
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/docs/tutorials/new_dataset.md
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/docs/tutorials/new_modules.md
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/docs/tutorials/onnx2tensorrt.md
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/docs/tutorials/pytorch2onnx.md
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/docs/tutorials/pytorch2torchscript.md
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/docs_zh-CN/conf.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/docs_zh-CN/getting_started.md
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/docs_zh-CN/index.rst
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/docs_zh-CN/install.md
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/docs_zh-CN/stat.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/docs_zh-CN/switch_language.md
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/docs_zh-CN/tutorials/MMClassification_Tutorial.ipynb
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/docs_zh-CN/tutorials/data_pipeline.md
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/docs_zh-CN/tutorials/finetune.md
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/docs_zh-CN/tutorials/new_dataset.md
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/docs_zh-CN/tutorials/new_modules.md
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/docs_zh-CN/tutorials/onnx2tensorrt.md
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/docs_zh-CN/tutorials/pytorch2onnx.md
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/image/train/1659061854685.png
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/image/train/1659062180839.png
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/image/train/1659064222206.png
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/image/train/1659064427635.png
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/image/train/1659064905610.png
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/image/train/1659064925468.png
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/image/train/1659065333529.png
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/image/train/1659065659769.png
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/image/train/1659065746317.png
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/image/train/1659066120939.png
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/image/train/1659067079718.png
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/images/1657694041240.png
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/images/1657694072163.png
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/images/2022-07-13-14-06-28.png
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/images/2022-07-13-14-07-43.png
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls.egg-info/PKG-INFO
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls.egg-info/SOURCES.txt
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls.egg-info/dependency_links.txt
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls.egg-info/not-zip-safe
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls.egg-info/requires.txt
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls.egg-info/top_level.txt
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/__init__.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/__pycache__/__init__.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/__pycache__/version.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/apis/__init__.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/apis/__pycache__/__init__.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/apis/__pycache__/inference.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/apis/__pycache__/test.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/apis/__pycache__/train.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/apis/test.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/apis/train.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/core/__init__.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/core/__pycache__/__init__.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/core/evaluation/__init__.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/core/evaluation/__pycache__/__init__.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/core/evaluation/__pycache__/eval_hooks.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/core/evaluation/__pycache__/eval_metrics.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/core/evaluation/__pycache__/mean_ap.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/core/evaluation/__pycache__/multilabel_eval_metrics.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/core/evaluation/eval_hooks.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/core/evaluation/eval_metrics.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/core/export/__init__.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/core/export/test.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/core/fp16/__init__.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/core/fp16/__pycache__/__init__.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/core/fp16/__pycache__/decorators.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/core/fp16/__pycache__/hooks.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/core/fp16/__pycache__/utils.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/core/fp16/decorators.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/core/fp16/hooks.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/core/fp16/utils.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/core/utils/__init__.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/core/utils/__pycache__/__init__.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/core/utils/__pycache__/dist_utils.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/core/utils/__pycache__/misc.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/core/utils/dist_utils.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/datasets/__init__.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/datasets/__pycache__/__init__.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/datasets/__pycache__/base_dataset.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/datasets/__pycache__/builder.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/datasets/__pycache__/cifar.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/datasets/__pycache__/dataset_wrappers.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/datasets/__pycache__/dummy.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/datasets/__pycache__/imagenet.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/datasets/__pycache__/mnist.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/datasets/__pycache__/multi_label.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/datasets/__pycache__/utils.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/datasets/__pycache__/voc.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/datasets/builder.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/datasets/dataset_wrappers.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/datasets/dummy.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/datasets/pipelines/__init__.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/datasets/pipelines/__pycache__/__init__.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/datasets/pipelines/__pycache__/auto_augment.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/datasets/pipelines/__pycache__/compose.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/datasets/pipelines/__pycache__/formating.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/datasets/pipelines/__pycache__/loading.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/datasets/pipelines/__pycache__/transforms.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/datasets/pipelines/formating.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/datasets/samplers/__init__.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/datasets/samplers/__pycache__/__init__.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/datasets/samplers/__pycache__/distributed_sampler.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/datasets/samplers/distributed_sampler.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/datasets/utils.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/datasets/voc.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/models/__init__.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/models/__pycache__/__init__.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/models/__pycache__/builder.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/models/backbones/__init__.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/models/backbones/__pycache__/__init__.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/models/backbones/__pycache__/alexnet.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/models/backbones/__pycache__/base_backbone.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/models/backbones/__pycache__/lenet.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/models/backbones/__pycache__/mobilenet_v2.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/models/backbones/__pycache__/mobilenet_v3.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/models/backbones/__pycache__/regnet.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/models/backbones/__pycache__/resnest.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/models/backbones/__pycache__/resnet.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/models/backbones/__pycache__/resnet_cifar.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/models/backbones/__pycache__/resnext.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/models/backbones/__pycache__/seresnet.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/models/backbones/__pycache__/seresnext.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/models/backbones/__pycache__/shufflenet_v1.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/models/backbones/__pycache__/shufflenet_v2.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/models/backbones/__pycache__/vgg.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/models/backbones/__pycache__/vision_transformer.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/models/backbones/mobilenet_v3.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/models/backbones/vision_transformer.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/models/builder.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/models/classifiers/__init__.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/models/classifiers/__pycache__/__init__.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/models/classifiers/__pycache__/base.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/models/classifiers/__pycache__/image.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/models/classifiers/base.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/models/classifiers/image.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/models/heads/__init__.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/models/heads/__pycache__/__init__.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/models/heads/__pycache__/base_head.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/models/heads/__pycache__/cls_head.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/models/heads/__pycache__/linear_head.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/models/heads/__pycache__/multi_label_head.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/models/heads/__pycache__/multi_label_linear_head.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/models/heads/__pycache__/vision_transformer_head.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/models/heads/cls_head.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/models/heads/linear_head.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/models/heads/multi_label_head.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/models/heads/multi_label_linear_head.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/models/heads/vision_transformer_head.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/models/losses/__init__.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/models/losses/__pycache__/__init__.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/models/losses/__pycache__/accuracy.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/models/losses/__pycache__/asymmetric_loss.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/models/losses/__pycache__/cross_entropy_loss.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/models/losses/__pycache__/focal_loss.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/models/losses/__pycache__/label_smooth_loss.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/models/losses/__pycache__/utils.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/models/losses/accuracy.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/models/losses/asymmetric_loss.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/models/losses/cross_entropy_loss.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/models/losses/utils.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/models/necks/__init__.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/models/necks/__pycache__/__init__.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/models/necks/__pycache__/gap.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/models/utils/__init__.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/models/utils/__pycache__/__init__.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/models/utils/__pycache__/channel_shuffle.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/models/utils/__pycache__/helpers.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/models/utils/__pycache__/inverted_residual.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/models/utils/__pycache__/make_divisible.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/models/utils/__pycache__/se_layer.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/models/utils/augment/__init__.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/models/utils/augment/__pycache__/__init__.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/models/utils/augment/__pycache__/augments.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/models/utils/augment/__pycache__/builder.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/models/utils/augment/__pycache__/cutmix.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/models/utils/augment/__pycache__/identity.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/models/utils/augment/__pycache__/mixup.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/models/utils/augment/builder.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/models/utils/augment/cutmix.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/models/utils/augment/mixup.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/models/utils/helpers.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/models/utils/inverted_residual.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/models/utils/se_layer.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/utils/__init__.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/utils/__pycache__/__init__.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/utils/__pycache__/collect_env.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/utils/__pycache__/logger.cpython-36.pyc
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/mmcls/utils/logger.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/model_zoo.yml
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/multi_test.sh
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/requirements/docs.txt
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/requirements/mminstall.txt
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/requirements/optional.txt
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/requirements/readthedocs.txt
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/requirements/runtime.txt
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/setup.cfg
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/setup.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/tests/test_backbones/test_mobilenet_v3.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/tests/test_backbones/test_utils.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/tests/test_backbones/test_vision_transformer.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/tests/test_classifiers.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/tests/test_dataset.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/tests/test_heads.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/tests/test_losses.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/tests/test_metrics.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/tests/test_neck.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/tools/benchmark_regression.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/tools/deployment/test.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/tools/dist_test.sh
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/tools/dist_train.sh
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/tools/publish_model.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/tools/test.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/tools/train.py
delete mode 100644 openmmlab_test/mmclassification-speed-benchmark/train.md
diff --git a/openmmlab_test/mmclassification-0.24.1/.gitattributes b/openmmlab_test/mmclassification-0.24.1/.gitattributes
new file mode 100644
index 00000000..f199a9ea
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/.gitattributes
@@ -0,0 +1,2 @@
+docs/** linguist-documentation
+docs_zh-CN/** linguist-documentation
diff --git a/openmmlab_test/mmclassification-0.24.1/.gitignore b/openmmlab_test/mmclassification-0.24.1/.gitignore
new file mode 100644
index 00000000..f6940c76
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/.gitignore
@@ -0,0 +1,134 @@
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+*$py.class
+**/*.pyc
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+MANIFEST
+
+# PyInstaller
+# Usually these files are written by a python script from a template
+# before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*.cover
+.hypothesis/
+.pytest_cache/
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+local_settings.py
+db.sqlite3
+
+# Flask stuff:
+instance/
+.webassets-cache
+
+# Scrapy stuff:
+.scrapy
+
+# Auto generate documentation
+docs/en/_build/
+docs/en/_model_zoo.rst
+docs/en/modelzoo_statistics.md
+docs/en/papers/
+docs/en/api/generated/
+docs/zh_CN/_build/
+docs/zh_CN/_model_zoo.rst
+docs/zh_CN/modelzoo_statistics.md
+docs/zh_CN/papers/
+docs/zh_CN/api/generated/
+
+# PyBuilder
+target/
+
+# Jupyter Notebook
+.ipynb_checkpoints
+
+# pyenv
+.python-version
+
+# celery beat schedule file
+celerybeat-schedule
+
+# SageMath parsed files
+*.sage.py
+
+# Environments
+.env
+.venv
+env/
+venv/
+ENV/
+env.bak/
+venv.bak/
+
+# Spyder project settings
+.spyderproject
+.spyproject
+
+# Rope project settings
+.ropeproject
+
+# mkdocs documentation
+/site
+
+# mypy
+.mypy_cache/
+
+# custom
+/data
+.vscode
+.idea
+*.pkl
+*.pkl.json
+*.log.json
+/work_dirs
+/mmcls/.mim
+.DS_Store
+
+# Pytorch
+*.pth
+
+# IPU
+*.pvti
+*.pvti-journal
+/cache_engine
+/report
diff --git a/openmmlab_test/mmclassification-0.24.1/.pre-commit-config.yaml b/openmmlab_test/mmclassification-0.24.1/.pre-commit-config.yaml
new file mode 100644
index 00000000..0d19d5f6
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/.pre-commit-config.yaml
@@ -0,0 +1,58 @@
+exclude: ^tests/data/
+repos:
+ - repo: https://github.com/PyCQA/flake8
+ rev: 4.0.1
+ hooks:
+ - id: flake8
+ - repo: https://github.com/PyCQA/isort
+ rev: 5.10.1
+ hooks:
+ - id: isort
+ - repo: https://github.com/pre-commit/mirrors-yapf
+ rev: v0.30.0
+ hooks:
+ - id: yapf
+ - repo: https://github.com/pre-commit/pre-commit-hooks
+ rev: v3.1.0
+ hooks:
+ - id: trailing-whitespace
+ - id: check-yaml
+ - id: end-of-file-fixer
+ - id: requirements-txt-fixer
+ - id: double-quote-string-fixer
+ - id: check-merge-conflict
+ - id: fix-encoding-pragma
+ args: ["--remove"]
+ - id: mixed-line-ending
+ args: ["--fix=lf"]
+ - repo: https://github.com/executablebooks/mdformat
+ rev: 0.7.9
+ hooks:
+ - id: mdformat
+ args: ["--number", "--table-width", "200"]
+ additional_dependencies:
+ - mdformat-openmmlab
+ - mdformat_frontmatter
+ - linkify-it-py
+ - repo: https://github.com/codespell-project/codespell
+ rev: v2.1.0
+ hooks:
+ - id: codespell
+ - repo: https://github.com/myint/docformatter
+ rev: v1.3.1
+ hooks:
+ - id: docformatter
+ args: ["--in-place", "--wrap-descriptions", "79"]
+ - repo: https://github.com/open-mmlab/pre-commit-hooks
+ rev: v0.2.0
+ hooks:
+ - id: check-copyright
+ args: ["mmcls", "tests", "demo", "tools"]
+ # - repo: local
+ # hooks:
+ # - id: clang-format
+ # name: clang-format
+ # description: Format files with ClangFormat
+ # entry: clang-format -style=google -i
+ # language: system
+ # files: \.(c|cc|cxx|cpp|cu|h|hpp|hxx|cuh|proto)$
diff --git a/openmmlab_test/mmclassification-0.24.1/.readthedocs.yml b/openmmlab_test/mmclassification-0.24.1/.readthedocs.yml
new file mode 100644
index 00000000..6cfbf5d3
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/.readthedocs.yml
@@ -0,0 +1,9 @@
+version: 2
+
+formats: all
+
+python:
+ version: 3.7
+ install:
+ - requirements: requirements/docs.txt
+ - requirements: requirements/readthedocs.txt
diff --git a/openmmlab_test/mmclassification-0.24.1/CITATION.cff b/openmmlab_test/mmclassification-0.24.1/CITATION.cff
new file mode 100644
index 00000000..0c0d7730
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/CITATION.cff
@@ -0,0 +1,9 @@
+cff-version: 1.2.0
+message: "If you use this software, please cite it as below."
+title: "OpenMMLab's Image Classification Toolbox and Benchmark"
+authors:
+ - name: "MMClassification Contributors"
+version: 0.15.0
+date-released: 2020-07-09
+repository-code: "https://github.com/open-mmlab/mmclassification"
+license: Apache-2.0
diff --git a/openmmlab_test/mmclassification-0.24.1/CONTRIBUTING.md b/openmmlab_test/mmclassification-0.24.1/CONTRIBUTING.md
new file mode 100644
index 00000000..8a0c6329
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/CONTRIBUTING.md
@@ -0,0 +1,61 @@
+# Contributing to OpenMMLab
+
+All kinds of contributions are welcome, including but not limited to the following.
+
+- Fix typo or bugs
+- Add documentation or translate the documentation into other languages
+- Add new features and components
+
+## Workflow
+
+1. fork and pull the latest OpenMMLab repository (MMClassification)
+2. checkout a new branch (do not use master branch for PRs)
+3. commit your changes
+4. create a PR
+
+```{note}
+If you plan to add some new features that involve large changes, it is encouraged to open an issue for discussion first.
+```
+
+## Code style
+
+### Python
+
+We adopt [PEP8](https://www.python.org/dev/peps/pep-0008/) as the preferred code style.
+
+We use the following tools for linting and formatting:
+
+- [flake8](https://github.com/PyCQA/flake8): A wrapper around some linter tools.
+- [isort](https://github.com/timothycrosley/isort): A Python utility to sort imports.
+- [yapf](https://github.com/google/yapf): A formatter for Python files.
+- [codespell](https://github.com/codespell-project/codespell): A Python utility to fix common misspellings in text files.
+- [mdformat](https://github.com/executablebooks/mdformat): Mdformat is an opinionated Markdown formatter that can be used to enforce a consistent style in Markdown files.
+- [docformatter](https://github.com/myint/docformatter): A formatter to format docstring.
+
+Style configurations can be found in [setup.cfg](./setup.cfg).
+
+We use [pre-commit hook](https://pre-commit.com/) that checks and formats for `flake8`, `yapf`, `isort`, `trailing whitespaces`, `markdown files`,
+fixes `end-of-files`, `double-quoted-strings`, `python-encoding-pragma`, `mixed-line-ending`, sorts `requirments.txt` automatically on every commit.
+The config for a pre-commit hook is stored in [.pre-commit-config](https://github.com/open-mmlab/mmclassification/blob/master/.pre-commit-config.yaml).
+
+After you clone the repository, you will need to install initialize pre-commit hook.
+
+```shell
+pip install -U pre-commit
+```
+
+From the repository folder
+
+```shell
+pre-commit install
+```
+
+After this on every commit check code linters and formatter will be enforced.
+
+```{important}
+Before you create a PR, make sure that your code lints and is formatted by yapf.
+```
+
+### C++ and CUDA
+
+We follow the [Google C++ Style Guide](https://google.github.io/styleguide/cppguide.html).
diff --git a/openmmlab_test/mmclassification-speed-benchmark/LICENSE b/openmmlab_test/mmclassification-0.24.1/LICENSE
similarity index 99%
rename from openmmlab_test/mmclassification-speed-benchmark/LICENSE
rename to openmmlab_test/mmclassification-0.24.1/LICENSE
index 9c478002..f731325b 100644
--- a/openmmlab_test/mmclassification-speed-benchmark/LICENSE
+++ b/openmmlab_test/mmclassification-0.24.1/LICENSE
@@ -1,4 +1,4 @@
-Copyright 2020 MMClassification Authors. All rights reserved.
+Copyright (c) OpenMMLab. All rights reserved
Apache License
Version 2.0, January 2004
diff --git a/openmmlab_test/mmclassification-0.24.1/MANIFEST.in b/openmmlab_test/mmclassification-0.24.1/MANIFEST.in
new file mode 100644
index 00000000..17ddc8c7
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/MANIFEST.in
@@ -0,0 +1,4 @@
+include requirements/*.txt
+include mmcls/.mim/model-index.yml
+recursive-include mmcls/.mim/configs *.py *.yml
+recursive-include mmcls/.mim/tools *.py *.sh
diff --git a/openmmlab_test/mmclassification-0.24.1/README.md b/openmmlab_test/mmclassification-0.24.1/README.md
new file mode 100644
index 00000000..9535854a
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/README.md
@@ -0,0 +1,207 @@
+
+
+

+
+
+
+
+[](https://pypi.org/project/mmcls)
+[](https://mmclassification.readthedocs.io/en/latest/)
+[](https://github.com/open-mmlab/mmclassification/actions)
+[](https://codecov.io/gh/open-mmlab/mmclassification)
+[](https://github.com/open-mmlab/mmclassification/blob/master/LICENSE)
+[](https://github.com/open-mmlab/mmclassification/issues)
+[](https://github.com/open-mmlab/mmclassification/issues)
+
+[📘 Documentation](https://mmclassification.readthedocs.io/en/latest/) |
+[🛠️ Installation](https://mmclassification.readthedocs.io/en/latest/install.html) |
+[👀 Model Zoo](https://mmclassification.readthedocs.io/en/latest/model_zoo.html) |
+[🆕 Update News](https://mmclassification.readthedocs.io/en/latest/changelog.html) |
+[🤔 Reporting Issues](https://github.com/open-mmlab/mmclassification/issues/new/choose)
+
+:point_right: **MMClassification 1.0 branch is in trial, welcome every to [try it](https://github.com/open-mmlab/mmclassification/tree/1.x) and [discuss with us](https://github.com/open-mmlab/mmclassification/discussions)!** :point_left:
+
+
+
+## Introduction
+
+English | [简体中文](/README_zh-CN.md) | [模型的测试方法及测试步骤](train.md)
+
+MMClassification is an open source image classification toolbox based on PyTorch. It is
+a part of the [OpenMMLab](https://openmmlab.com/) project.
+
+The master branch works with **PyTorch 1.5+**.
+
+
+

+
+
+### Major features
+
+- Various backbones and pretrained models
+- Bag of training tricks
+- Large-scale training configs
+- High efficiency and extensibility
+- Powerful toolkits
+
+## What's new
+
+The MMClassification 1.0 has released! It's still unstable and in release candidate. If you want to try it, go
+to [the 1.x branch](https://github.com/open-mmlab/mmclassification/tree/1.x) and discuss it with us in
+[the discussion](https://github.com/open-mmlab/mmclassification/discussions).
+
+v0.24.1 was released in 31/10/2022.
+Highlights of the new version:
+
+- Support HUAWEI Ascend device.
+
+v0.24.0 was released in 30/9/2022.
+Highlights of the new version:
+
+- Support **HorNet**, **EfficientFormerm**, **SwinTransformer V2** and **MViT** backbones.
+- Support Standford Cars dataset.
+
+v0.23.0 was released in 1/5/2022.
+Highlights of the new version:
+
+- Support **DenseNet**, **VAN** and **PoolFormer**, and provide pre-trained models.
+- Support training on IPU.
+- New style API docs, welcome [view it](https://mmclassification.readthedocs.io/en/master/api/models.html).
+
+Please refer to [changelog.md](docs/en/changelog.md) for more details and other release history.
+
+## Installation
+
+Below are quick steps for installation:
+
+```shell
+conda create -n open-mmlab python=3.8 pytorch=1.10 cudatoolkit=11.3 torchvision==0.11.0 -c pytorch -y
+conda activate open-mmlab
+pip3 install openmim
+mim install mmcv-full
+git clone https://github.com/open-mmlab/mmclassification.git
+cd mmclassification
+pip3 install -e .
+```
+
+Please refer to [install.md](https://mmclassification.readthedocs.io/en/latest/install.html) for more detailed installation and dataset preparation.
+
+## Getting Started
+
+Please see [Getting Started](https://mmclassification.readthedocs.io/en/latest/getting_started.html) for the basic usage of MMClassification. There are also tutorials:
+
+- [Learn about Configs](https://mmclassification.readthedocs.io/en/latest/tutorials/config.html)
+- [Fine-tune Models](https://mmclassification.readthedocs.io/en/latest/tutorials/finetune.html)
+- [Add New Dataset](https://mmclassification.readthedocs.io/en/latest/tutorials/new_dataset.html)
+- [Customizie Data Pipeline](https://mmclassification.readthedocs.io/en/latest/tutorials/data_pipeline.html)
+- [Add New Modules](https://mmclassification.readthedocs.io/en/latest/tutorials/new_modules.html)
+- [Customizie Schedule](https://mmclassification.readthedocs.io/en/latest/tutorials/schedule.html)
+- [Customizie Runtime Settings](https://mmclassification.readthedocs.io/en/latest/tutorials/runtime.html)
+
+Colab tutorials are also provided:
+
+- Learn about MMClassification **Python API**: [Preview the notebook](https://github.com/open-mmlab/mmclassification/blob/master/docs/en/tutorials/MMClassification_python.ipynb) or directly [run on Colab](https://colab.research.google.com/github/open-mmlab/mmclassification/blob/master/docs/en/tutorials/MMClassification_python.ipynb).
+- Learn about MMClassification **CLI tools**: [Preview the notebook](https://github.com/open-mmlab/mmclassification/blob/master/docs/en/tutorials/MMClassification_tools.ipynb) or directly [run on Colab](https://colab.research.google.com/github/open-mmlab/mmclassification/blob/master/docs/en/tutorials/MMClassification_tools.ipynb).
+
+## Model zoo
+
+Results and models are available in the [model zoo](https://mmclassification.readthedocs.io/en/latest/model_zoo.html).
+
+
+Supported backbones
+
+- [x] [VGG](https://github.com/open-mmlab/mmclassification/tree/master/configs/vgg)
+- [x] [ResNet](https://github.com/open-mmlab/mmclassification/tree/master/configs/resnet)
+- [x] [ResNeXt](https://github.com/open-mmlab/mmclassification/tree/master/configs/resnext)
+- [x] [SE-ResNet](https://github.com/open-mmlab/mmclassification/tree/master/configs/seresnet)
+- [x] [SE-ResNeXt](https://github.com/open-mmlab/mmclassification/tree/master/configs/seresnet)
+- [x] [RegNet](https://github.com/open-mmlab/mmclassification/tree/master/configs/regnet)
+- [x] [ShuffleNetV1](https://github.com/open-mmlab/mmclassification/tree/master/configs/shufflenet_v1)
+- [x] [ShuffleNetV2](https://github.com/open-mmlab/mmclassification/tree/master/configs/shufflenet_v2)
+- [x] [MobileNetV2](https://github.com/open-mmlab/mmclassification/tree/master/configs/mobilenet_v2)
+- [x] [MobileNetV3](https://github.com/open-mmlab/mmclassification/tree/master/configs/mobilenet_v3)
+- [x] [Swin-Transformer](https://github.com/open-mmlab/mmclassification/tree/master/configs/swin_transformer)
+- [x] [RepVGG](https://github.com/open-mmlab/mmclassification/tree/master/configs/repvgg)
+- [x] [Vision-Transformer](https://github.com/open-mmlab/mmclassification/tree/master/configs/vision_transformer)
+- [x] [Transformer-in-Transformer](https://github.com/open-mmlab/mmclassification/tree/master/configs/tnt)
+- [x] [Res2Net](https://github.com/open-mmlab/mmclassification/tree/master/configs/res2net)
+- [x] [MLP-Mixer](https://github.com/open-mmlab/mmclassification/tree/master/configs/mlp_mixer)
+- [x] [DeiT](https://github.com/open-mmlab/mmclassification/tree/master/configs/deit)
+- [x] [Conformer](https://github.com/open-mmlab/mmclassification/tree/master/configs/conformer)
+- [x] [T2T-ViT](https://github.com/open-mmlab/mmclassification/tree/master/configs/t2t_vit)
+- [x] [Twins](https://github.com/open-mmlab/mmclassification/tree/master/configs/twins)
+- [x] [EfficientNet](https://github.com/open-mmlab/mmclassification/tree/master/configs/efficientnet)
+- [x] [ConvNeXt](https://github.com/open-mmlab/mmclassification/tree/master/configs/convnext)
+- [x] [HRNet](https://github.com/open-mmlab/mmclassification/tree/master/configs/hrnet)
+- [x] [VAN](https://github.com/open-mmlab/mmclassification/tree/master/configs/van)
+- [x] [ConvMixer](https://github.com/open-mmlab/mmclassification/tree/master/configs/convmixer)
+- [x] [CSPNet](https://github.com/open-mmlab/mmclassification/tree/master/configs/cspnet)
+- [x] [PoolFormer](https://github.com/open-mmlab/mmclassification/tree/master/configs/poolformer)
+- [x] [MViT](https://github.com/open-mmlab/mmclassification/tree/master/configs/mvit)
+- [x] [EfficientFormer](https://github.com/open-mmlab/mmclassification/tree/master/configs/efficientformer)
+- [x] [HorNet](https://github.com/open-mmlab/mmclassification/tree/master/configs/hornet)
+
+
+
+## Contributing
+
+We appreciate all contributions to improve MMClassification.
+Please refer to [CONTRUBUTING.md](https://mmclassification.readthedocs.io/en/latest/community/CONTRIBUTING.html) for the contributing guideline.
+
+## Acknowledgement
+
+MMClassification is an open source project that is contributed by researchers and engineers from various colleges and companies. We appreciate all the contributors who implement their methods or add new features, as well as users who give valuable feedbacks.
+We wish that the toolbox and benchmark could serve the growing research community by providing a flexible toolkit to reimplement existing methods and develop their own new classifiers.
+
+## Citation
+
+If you find this project useful in your research, please consider cite:
+
+```BibTeX
+@misc{2020mmclassification,
+ title={OpenMMLab's Image Classification Toolbox and Benchmark},
+ author={MMClassification Contributors},
+ howpublished = {\url{https://github.com/open-mmlab/mmclassification}},
+ year={2020}
+}
+```
+
+## License
+
+This project is released under the [Apache 2.0 license](LICENSE).
+
+## Projects in OpenMMLab
+
+- [MMCV](https://github.com/open-mmlab/mmcv): OpenMMLab foundational library for computer vision.
+- [MIM](https://github.com/open-mmlab/mim): MIM installs OpenMMLab packages.
+- [MMClassification](https://github.com/open-mmlab/mmclassification): OpenMMLab image classification toolbox and benchmark.
+- [MMDetection](https://github.com/open-mmlab/mmdetection): OpenMMLab detection toolbox and benchmark.
+- [MMDetection3D](https://github.com/open-mmlab/mmdetection3d): OpenMMLab's next-generation platform for general 3D object detection.
+- [MMRotate](https://github.com/open-mmlab/mmrotate): OpenMMLab rotated object detection toolbox and benchmark.
+- [MMSegmentation](https://github.com/open-mmlab/mmsegmentation): OpenMMLab semantic segmentation toolbox and benchmark.
+- [MMOCR](https://github.com/open-mmlab/mmocr): OpenMMLab text detection, recognition, and understanding toolbox.
+- [MMPose](https://github.com/open-mmlab/mmpose): OpenMMLab pose estimation toolbox and benchmark.
+- [MMHuman3D](https://github.com/open-mmlab/mmhuman3d): OpenMMLab 3D human parametric model toolbox and benchmark.
+- [MMSelfSup](https://github.com/open-mmlab/mmselfsup): OpenMMLab self-supervised learning toolbox and benchmark.
+- [MMRazor](https://github.com/open-mmlab/mmrazor): OpenMMLab model compression toolbox and benchmark.
+- [MMFewShot](https://github.com/open-mmlab/mmfewshot): OpenMMLab fewshot learning toolbox and benchmark.
+- [MMAction2](https://github.com/open-mmlab/mmaction2): OpenMMLab's next-generation action understanding toolbox and benchmark.
+- [MMTracking](https://github.com/open-mmlab/mmtracking): OpenMMLab video perception toolbox and benchmark.
+- [MMFlow](https://github.com/open-mmlab/mmflow): OpenMMLab optical flow toolbox and benchmark.
+- [MMEditing](https://github.com/open-mmlab/mmediting): OpenMMLab image and video editing toolbox.
+- [MMGeneration](https://github.com/open-mmlab/mmgeneration): OpenMMLab image and video generative models toolbox.
+- [MMDeploy](https://github.com/open-mmlab/mmdeploy): OpenMMLab model deployment framework.
diff --git a/openmmlab_test/mmclassification-0.24.1/README_zh-CN.md b/openmmlab_test/mmclassification-0.24.1/README_zh-CN.md
new file mode 100644
index 00000000..60f06209
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/README_zh-CN.md
@@ -0,0 +1,222 @@
+
+
+

+
+
+
+
+[](https://pypi.org/project/mmcls)
+[](https://mmclassification.readthedocs.io/zh_CN/latest/)
+[](https://github.com/open-mmlab/mmclassification/actions)
+[](https://codecov.io/gh/open-mmlab/mmclassification)
+[](https://github.com/open-mmlab/mmclassification/blob/master/LICENSE)
+[](https://github.com/open-mmlab/mmclassification/issues)
+[](https://github.com/open-mmlab/mmclassification/issues)
+
+[📘 中文文档](https://mmclassification.readthedocs.io/zh_CN/latest/) |
+[🛠️ 安装教程](https://mmclassification.readthedocs.io/zh_CN/latest/install.html) |
+[👀 模型库](https://mmclassification.readthedocs.io/zh_CN/latest/model_zoo.html) |
+[🆕 更新日志](https://mmclassification.readthedocs.io/en/latest/changelog.html) |
+[🤔 报告问题](https://github.com/open-mmlab/mmclassification/issues/new/choose)
+
+:point_right: **MMClassification 1.0 版本即将正式发布,欢迎大家 [试用](https://github.com/open-mmlab/mmclassification/tree/1.x) 并 [参与讨论](https://github.com/open-mmlab/mmclassification/discussions)!** :point_left:
+
+
+
+
+
+## Introduction
+
+[English](/README.md) | 简体中文
+
+MMClassification 是一款基于 PyTorch 的开源图像分类工具箱,是 [OpenMMLab](https://openmmlab.com/) 项目的成员之一
+
+主分支代码目前支持 PyTorch 1.5 以上的版本。
+
+
+

+
+
+### 主要特性
+
+- 支持多样的主干网络与预训练模型
+- 支持配置多种训练技巧
+- 大量的训练配置文件
+- 高效率和高可扩展性
+- 功能强大的工具箱
+
+## 更新日志
+
+MMClassification 1.0 已经发布!目前仍在公测中,如果希望试用,请切换到 [1.x 分支](https://github.com/open-mmlab/mmclassification/tree/1.x),并在[讨论版](https://github.com/open-mmlab/mmclassification/discussions) 参加开发讨论!
+
+2022/10/31 发布了 v0.24.1 版本
+
+- 支持了华为昇腾 NPU 设备。
+
+2022/9/30 发布了 v0.24.0 版本
+
+- 支持了 **HorNet**,**EfficientFormerm**,**SwinTransformer V2**,**MViT** 等主干网络。
+- 支持了 Support Standford Cars 数据集。
+
+2022/5/1 发布了 v0.23.0 版本
+
+- 支持了 **DenseNet**,**VAN** 和 **PoolFormer** 三个网络,并提供了预训练模型。
+- 支持在 IPU 上进行训练。
+- 更新了 API 文档的样式,更方便查阅,[欢迎查阅](https://mmclassification.readthedocs.io/en/master/api/models.html)。
+
+发布历史和更新细节请参考 [更新日志](docs/en/changelog.md)
+
+## 安装
+
+以下是安装的简要步骤:
+
+```shell
+conda create -n open-mmlab python=3.8 pytorch=1.10 cudatoolkit=11.3 torchvision==0.11.0 -c pytorch -y
+conda activate open-mmlab
+pip3 install openmim
+mim install mmcv-full
+git clone https://github.com/open-mmlab/mmclassification.git
+cd mmclassification
+pip3 install -e .
+```
+
+更详细的步骤请参考 [安装指南](https://mmclassification.readthedocs.io/zh_CN/latest/install.html) 进行安装。
+
+## 基础教程
+
+请参考 [基础教程](https://mmclassification.readthedocs.io/zh_CN/latest/getting_started.html) 来了解 MMClassification 的基本使用。MMClassification 也提供了其他更详细的教程:
+
+- [如何编写配置文件](https://mmclassification.readthedocs.io/zh_CN/latest/tutorials/config.html)
+- [如何微调模型](https://mmclassification.readthedocs.io/zh_CN/latest/tutorials/finetune.html)
+- [如何增加新数据集](https://mmclassification.readthedocs.io/zh_CN/latest/tutorials/new_dataset.html)
+- [如何设计数据处理流程](https://mmclassification.readthedocs.io/zh_CN/latest/tutorials/data_pipeline.html)
+- [如何增加新模块](https://mmclassification.readthedocs.io/zh_CN/latest/tutorials/new_modules.html)
+- [如何自定义优化策略](https://mmclassification.readthedocs.io/zh_CN/latest/tutorials/schedule.html)
+- [如何自定义运行参数](https://mmclassification.readthedocs.io/zh_CN/latest/tutorials/runtime.html)
+
+我们也提供了相应的中文 Colab 教程:
+
+- 了解 MMClassification **Python API**:[预览 Notebook](https://github.com/open-mmlab/mmclassification/blob/master/docs/zh_CN/tutorials/MMClassification_python_cn.ipynb) 或者直接[在 Colab 上运行](https://colab.research.google.com/github/open-mmlab/mmclassification/blob/master/docs/zh_CN/tutorials/MMClassification_python_cn.ipynb)。
+- 了解 MMClassification **命令行工具**:[预览 Notebook](https://github.com/open-mmlab/mmclassification/blob/master/docs/zh_CN/tutorials/MMClassification_tools_cn.ipynb) 或者直接[在 Colab 上运行](https://colab.research.google.com/github/open-mmlab/mmclassification/blob/master/docs/zh_CN/tutorials/MMClassification_tools_cn.ipynb)。
+
+## 模型库
+
+相关结果和模型可在 [model zoo](https://mmclassification.readthedocs.io/en/latest/model_zoo.html) 中获得
+
+
+支持的主干网络
+
+- [x] [VGG](https://github.com/open-mmlab/mmclassification/tree/master/configs/vgg)
+- [x] [ResNet](https://github.com/open-mmlab/mmclassification/tree/master/configs/resnet)
+- [x] [ResNeXt](https://github.com/open-mmlab/mmclassification/tree/master/configs/resnext)
+- [x] [SE-ResNet](https://github.com/open-mmlab/mmclassification/tree/master/configs/seresnet)
+- [x] [SE-ResNeXt](https://github.com/open-mmlab/mmclassification/tree/master/configs/seresnet)
+- [x] [RegNet](https://github.com/open-mmlab/mmclassification/tree/master/configs/regnet)
+- [x] [ShuffleNetV1](https://github.com/open-mmlab/mmclassification/tree/master/configs/shufflenet_v1)
+- [x] [ShuffleNetV2](https://github.com/open-mmlab/mmclassification/tree/master/configs/shufflenet_v2)
+- [x] [MobileNetV2](https://github.com/open-mmlab/mmclassification/tree/master/configs/mobilenet_v2)
+- [x] [MobileNetV3](https://github.com/open-mmlab/mmclassification/tree/master/configs/mobilenet_v3)
+- [x] [Swin-Transformer](https://github.com/open-mmlab/mmclassification/tree/master/configs/swin_transformer)
+- [x] [RepVGG](https://github.com/open-mmlab/mmclassification/tree/master/configs/repvgg)
+- [x] [Vision-Transformer](https://github.com/open-mmlab/mmclassification/tree/master/configs/vision_transformer)
+- [x] [Transformer-in-Transformer](https://github.com/open-mmlab/mmclassification/tree/master/configs/tnt)
+- [x] [Res2Net](https://github.com/open-mmlab/mmclassification/tree/master/configs/res2net)
+- [x] [MLP-Mixer](https://github.com/open-mmlab/mmclassification/tree/master/configs/mlp_mixer)
+- [x] [DeiT](https://github.com/open-mmlab/mmclassification/tree/master/configs/deit)
+- [x] [Conformer](https://github.com/open-mmlab/mmclassification/tree/master/configs/conformer)
+- [x] [T2T-ViT](https://github.com/open-mmlab/mmclassification/tree/master/configs/t2t_vit)
+- [x] [Twins](https://github.com/open-mmlab/mmclassification/tree/master/configs/twins)
+- [x] [EfficientNet](https://github.com/open-mmlab/mmclassification/tree/master/configs/efficientnet)
+- [x] [ConvNeXt](https://github.com/open-mmlab/mmclassification/tree/master/configs/convnext)
+- [x] [HRNet](https://github.com/open-mmlab/mmclassification/tree/master/configs/hrnet)
+- [x] [VAN](https://github.com/open-mmlab/mmclassification/tree/master/configs/van)
+- [x] [ConvMixer](https://github.com/open-mmlab/mmclassification/tree/master/configs/convmixer)
+- [x] [CSPNet](https://github.com/open-mmlab/mmclassification/tree/master/configs/cspnet)
+- [x] [PoolFormer](https://github.com/open-mmlab/mmclassification/tree/master/configs/poolformer)
+- [x] [MViT](https://github.com/open-mmlab/mmclassification/tree/master/configs/mvit)
+- [x] [EfficientFormer](https://github.com/open-mmlab/mmclassification/tree/master/configs/efficientformer)
+- [x] [HorNet](https://github.com/open-mmlab/mmclassification/tree/master/configs/hornet)
+
+
+
+## 参与贡献
+
+我们非常欢迎任何有助于提升 MMClassification 的贡献,请参考 [贡献指南](https://mmclassification.readthedocs.io/zh_CN/latest/community/CONTRIBUTING.html) 来了解如何参与贡献。
+
+## 致谢
+
+MMClassification 是一款由不同学校和公司共同贡献的开源项目。我们感谢所有为项目提供算法复现和新功能支持的贡献者,以及提供宝贵反馈的用户。
+
+我们希望该工具箱和基准测试可以为社区提供灵活的代码工具,供用户复现现有算法并开发自己的新模型,从而不断为开源社区提供贡献。
+
+## 引用
+
+如果你在研究中使用了本项目的代码或者性能基准,请参考如下 bibtex 引用 MMClassification。
+
+```BibTeX
+@misc{2020mmclassification,
+ title={OpenMMLab's Image Classification Toolbox and Benchmark},
+ author={MMClassification Contributors},
+ howpublished = {\url{https://github.com/open-mmlab/mmclassification}},
+ year={2020}
+}
+```
+
+## 许可证
+
+该项目开源自 [Apache 2.0 license](LICENSE).
+
+## OpenMMLab 的其他项目
+
+- [MMCV](https://github.com/open-mmlab/mmcv): OpenMMLab 计算机视觉基础库
+- [MIM](https://github.com/open-mmlab/mim): MIM 是 OpenMMlab 项目、算法、模型的统一入口
+- [MMClassification](https://github.com/open-mmlab/mmclassification): OpenMMLab 图像分类工具箱
+- [MMDetection](https://github.com/open-mmlab/mmdetection): OpenMMLab 目标检测工具箱
+- [MMDetection3D](https://github.com/open-mmlab/mmdetection3d): OpenMMLab 新一代通用 3D 目标检测平台
+- [MMRotate](https://github.com/open-mmlab/mmrotate): OpenMMLab 旋转框检测工具箱与测试基准
+- [MMSegmentation](https://github.com/open-mmlab/mmsegmentation): OpenMMLab 语义分割工具箱
+- [MMOCR](https://github.com/open-mmlab/mmocr): OpenMMLab 全流程文字检测识别理解工具包
+- [MMPose](https://github.com/open-mmlab/mmpose): OpenMMLab 姿态估计工具箱
+- [MMHuman3D](https://github.com/open-mmlab/mmhuman3d): OpenMMLab 人体参数化模型工具箱与测试基准
+- [MMSelfSup](https://github.com/open-mmlab/mmselfsup): OpenMMLab 自监督学习工具箱与测试基准
+- [MMRazor](https://github.com/open-mmlab/mmrazor): OpenMMLab 模型压缩工具箱与测试基准
+- [MMFewShot](https://github.com/open-mmlab/mmfewshot): OpenMMLab 少样本学习工具箱与测试基准
+- [MMAction2](https://github.com/open-mmlab/mmaction2): OpenMMLab 新一代视频理解工具箱
+- [MMTracking](https://github.com/open-mmlab/mmtracking): OpenMMLab 一体化视频目标感知平台
+- [MMFlow](https://github.com/open-mmlab/mmflow): OpenMMLab 光流估计工具箱与测试基准
+- [MMEditing](https://github.com/open-mmlab/mmediting): OpenMMLab 图像视频编辑工具箱
+- [MMGeneration](https://github.com/open-mmlab/mmgeneration): OpenMMLab 图片视频生成模型工具箱
+- [MMDeploy](https://github.com/open-mmlab/mmdeploy): OpenMMLab 模型部署框架
+
+## 欢迎加入 OpenMMLab 社区
+
+扫描下方的二维码可关注 OpenMMLab 团队的 [知乎官方账号](https://www.zhihu.com/people/openmmlab),加入 OpenMMLab 团队的 [官方交流 QQ 群](https://jq.qq.com/?_wv=1027&k=aCvMxdr3) 或联络 OpenMMLab 官方微信小助手
+
+
+
+我们会在 OpenMMLab 社区为大家
+
+- 📢 分享 AI 框架的前沿核心技术
+- 💻 解读 PyTorch 常用模块源码
+- 📰 发布 OpenMMLab 的相关新闻
+- 🚀 介绍 OpenMMLab 开发的前沿算法
+- 🏃 获取更高效的问题答疑和意见反馈
+- 🔥 提供与各行各业开发者充分交流的平台
+
+干货满满 📘,等你来撩 💗,OpenMMLab 社区期待您的加入 👬
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/_base_/datasets/cifar100_bs16.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/cifar100_bs16.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/_base_/datasets/cifar100_bs16.py
rename to openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/cifar100_bs16.py
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/_base_/datasets/cifar10_bs16.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/cifar10_bs16.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/_base_/datasets/cifar10_bs16.py
rename to openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/cifar10_bs16.py
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/cub_bs8_384.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/cub_bs8_384.py
new file mode 100644
index 00000000..4acad24b
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/cub_bs8_384.py
@@ -0,0 +1,54 @@
+# dataset settings
+dataset_type = 'CUB'
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='Resize', size=510),
+ dict(type='RandomCrop', size=384),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='Resize', size=510),
+ dict(type='CenterCrop', crop_size=384),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+
+data_root = 'data/CUB_200_2011/'
+data = dict(
+ samples_per_gpu=8,
+ workers_per_gpu=2,
+ train=dict(
+ type=dataset_type,
+ ann_file=data_root + 'images.txt',
+ image_class_labels_file=data_root + 'image_class_labels.txt',
+ train_test_split_file=data_root + 'train_test_split.txt',
+ data_prefix=data_root + 'images',
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ ann_file=data_root + 'images.txt',
+ image_class_labels_file=data_root + 'image_class_labels.txt',
+ train_test_split_file=data_root + 'train_test_split.txt',
+ data_prefix=data_root + 'images',
+ test_mode=True,
+ pipeline=test_pipeline),
+ test=dict(
+ type=dataset_type,
+ ann_file=data_root + 'images.txt',
+ image_class_labels_file=data_root + 'image_class_labels.txt',
+ train_test_split_file=data_root + 'train_test_split.txt',
+ data_prefix=data_root + 'images',
+ test_mode=True,
+ pipeline=test_pipeline))
+
+evaluation = dict(
+ interval=1, metric='accuracy',
+ save_best='auto') # save the checkpoint with highest accuracy
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/cub_bs8_448.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/cub_bs8_448.py
new file mode 100644
index 00000000..9e909a18
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/cub_bs8_448.py
@@ -0,0 +1,54 @@
+# dataset settings
+dataset_type = 'CUB'
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='Resize', size=600),
+ dict(type='RandomCrop', size=448),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='Resize', size=600),
+ dict(type='CenterCrop', crop_size=448),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+
+data_root = 'data/CUB_200_2011/'
+data = dict(
+ samples_per_gpu=8,
+ workers_per_gpu=2,
+ train=dict(
+ type=dataset_type,
+ ann_file=data_root + 'images.txt',
+ image_class_labels_file=data_root + 'image_class_labels.txt',
+ train_test_split_file=data_root + 'train_test_split.txt',
+ data_prefix=data_root + 'images',
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ ann_file=data_root + 'images.txt',
+ image_class_labels_file=data_root + 'image_class_labels.txt',
+ train_test_split_file=data_root + 'train_test_split.txt',
+ data_prefix=data_root + 'images',
+ test_mode=True,
+ pipeline=test_pipeline),
+ test=dict(
+ type=dataset_type,
+ ann_file=data_root + 'images.txt',
+ image_class_labels_file=data_root + 'image_class_labels.txt',
+ train_test_split_file=data_root + 'train_test_split.txt',
+ data_prefix=data_root + 'images',
+ test_mode=True,
+ pipeline=test_pipeline))
+
+evaluation = dict(
+ interval=1, metric='accuracy',
+ save_best='auto') # save the checkpoint with highest accuracy
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/imagenet21k_bs128.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/imagenet21k_bs128.py
new file mode 100644
index 00000000..b81a7466
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/imagenet21k_bs128.py
@@ -0,0 +1,43 @@
+# dataset settings
+dataset_type = 'ImageNet21k'
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='RandomResizedCrop', size=224),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='Resize', size=(256, -1)),
+ dict(type='CenterCrop', crop_size=224),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+data = dict(
+ samples_per_gpu=128,
+ workers_per_gpu=2,
+ train=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet21k/train',
+ pipeline=train_pipeline,
+ recursion_subdir=True),
+ val=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet21k/val',
+ ann_file='data/imagenet21k/meta/val.txt',
+ pipeline=test_pipeline,
+ recursion_subdir=True),
+ test=dict(
+ # replace `data/val` with `data/test` for standard test
+ type=dataset_type,
+ data_prefix='data/imagenet21k/val',
+ ann_file='data/imagenet21k/meta/val.txt',
+ pipeline=test_pipeline,
+ recursion_subdir=True))
+evaluation = dict(interval=1, metric='accuracy')
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/imagenet_bs128_poolformer_medium_224.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/imagenet_bs128_poolformer_medium_224.py
new file mode 100644
index 00000000..667e58a1
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/imagenet_bs128_poolformer_medium_224.py
@@ -0,0 +1,71 @@
+_base_ = ['./pipelines/rand_aug.py']
+
+# dataset settings
+dataset_type = 'ImageNet'
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='RandomResizedCrop',
+ size=224,
+ backend='pillow',
+ interpolation='bicubic'),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(
+ type='RandAugment',
+ policies={{_base_.rand_increasing_policies}},
+ num_policies=2,
+ total_level=10,
+ magnitude_level=9,
+ magnitude_std=0.5,
+ hparams=dict(
+ pad_val=[round(x) for x in img_norm_cfg['mean'][::-1]],
+ interpolation='bicubic')),
+ dict(
+ type='RandomErasing',
+ erase_prob=0.25,
+ mode='rand',
+ min_area_ratio=0.02,
+ max_area_ratio=1 / 3,
+ fill_color=img_norm_cfg['mean'][::-1],
+ fill_std=img_norm_cfg['std'][::-1]),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='Resize',
+ size=(236, -1),
+ backend='pillow',
+ interpolation='bicubic'),
+ dict(type='CenterCrop', crop_size=224),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+data = dict(
+ samples_per_gpu=128,
+ workers_per_gpu=8,
+ train=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/train',
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline),
+ test=dict(
+ # replace `data/val` with `data/test` for standard test
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline))
+
+evaluation = dict(interval=10, metric='accuracy')
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/imagenet_bs128_poolformer_small_224.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/imagenet_bs128_poolformer_small_224.py
new file mode 100644
index 00000000..76aee7e1
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/imagenet_bs128_poolformer_small_224.py
@@ -0,0 +1,71 @@
+_base_ = ['./pipelines/rand_aug.py']
+
+# dataset settings
+dataset_type = 'ImageNet'
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='RandomResizedCrop',
+ size=224,
+ backend='pillow',
+ interpolation='bicubic'),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(
+ type='RandAugment',
+ policies={{_base_.rand_increasing_policies}},
+ num_policies=2,
+ total_level=10,
+ magnitude_level=9,
+ magnitude_std=0.5,
+ hparams=dict(
+ pad_val=[round(x) for x in img_norm_cfg['mean'][::-1]],
+ interpolation='bicubic')),
+ dict(
+ type='RandomErasing',
+ erase_prob=0.25,
+ mode='rand',
+ min_area_ratio=0.02,
+ max_area_ratio=1 / 3,
+ fill_color=img_norm_cfg['mean'][::-1],
+ fill_std=img_norm_cfg['std'][::-1]),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='Resize',
+ size=(248, -1),
+ backend='pillow',
+ interpolation='bicubic'),
+ dict(type='CenterCrop', crop_size=224),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+data = dict(
+ samples_per_gpu=128,
+ workers_per_gpu=8,
+ train=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/train',
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline),
+ test=dict(
+ # replace `data/val` with `data/test` for standard test
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline))
+
+evaluation = dict(interval=10, metric='accuracy')
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/imagenet_bs256_rsb_a12.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/imagenet_bs256_rsb_a12.py
new file mode 100644
index 00000000..75968556
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/imagenet_bs256_rsb_a12.py
@@ -0,0 +1,53 @@
+_base_ = ['./pipelines/rand_aug.py']
+
+# dataset settings
+dataset_type = 'ImageNet'
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='RandomResizedCrop', size=224),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(
+ type='RandAugment',
+ policies={{_base_.rand_increasing_policies}},
+ num_policies=2,
+ total_level=10,
+ magnitude_level=7,
+ magnitude_std=0.5,
+ hparams=dict(
+ pad_val=[round(x) for x in img_norm_cfg['mean'][::-1]],
+ interpolation='bicubic')),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='Resize', size=(236, -1)),
+ dict(type='CenterCrop', crop_size=224),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+data = dict(
+ samples_per_gpu=256,
+ workers_per_gpu=4,
+ train=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/train',
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline),
+ test=dict(
+ # replace `data/val` with `data/test` for standard test
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline))
+
+evaluation = dict(interval=1, metric='accuracy')
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/imagenet_bs256_rsb_a3.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/imagenet_bs256_rsb_a3.py
new file mode 100644
index 00000000..aee640d7
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/imagenet_bs256_rsb_a3.py
@@ -0,0 +1,53 @@
+_base_ = ['./pipelines/rand_aug.py']
+
+# dataset settings
+dataset_type = 'ImageNet'
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='RandomResizedCrop', size=160),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(
+ type='RandAugment',
+ policies={{_base_.rand_increasing_policies}},
+ num_policies=2,
+ total_level=10,
+ magnitude_level=6,
+ magnitude_std=0.5,
+ hparams=dict(
+ pad_val=[round(x) for x in img_norm_cfg['mean'][::-1]],
+ interpolation='bicubic')),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='Resize', size=(236, -1)),
+ dict(type='CenterCrop', crop_size=224),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+data = dict(
+ samples_per_gpu=256,
+ workers_per_gpu=4,
+ train=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/train',
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline),
+ test=dict(
+ # replace `data/val` with `data/test` for standard test
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline))
+
+evaluation = dict(interval=1, metric='accuracy')
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/imagenet_bs32.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/imagenet_bs32.py
new file mode 100644
index 00000000..aca48d76
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/imagenet_bs32.py
@@ -0,0 +1,40 @@
+# dataset settings
+dataset_type = 'ImageNet'
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='RandomResizedCrop', size=224),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='Resize', size=(256, -1)),
+ dict(type='CenterCrop', crop_size=224),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+data = dict(
+ samples_per_gpu=128, #[32],
+ workers_per_gpu=2,
+ train=dict(
+ type=dataset_type,
+ data_prefix='/public/DL_DATA/ImageNet-pytorch/train',
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ data_prefix='/work/home/ac60ssbz5p/openmmlab_test_new/data/',
+ ann_file='/work/home/ac60ssbz5p/openmmlab_test_new/data/val_list.txt',
+ pipeline=test_pipeline),
+ test=dict(
+ # replace `data/val` with `data/test` for standard test
+ type=dataset_type,
+ data_prefix='/work/home/ac60ssbz5p/openmmlab_test_new/data/',
+ ann_file='/work/home/ac60ssbz5p/openmmlab_test_new/data/val_list.txt',
+ pipeline=test_pipeline))
+evaluation = dict(interval=1, metric='accuracy')
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/imagenet_bs32_pil_bicubic.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/imagenet_bs32_pil_bicubic.py
new file mode 100644
index 00000000..d66c1bd9
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/imagenet_bs32_pil_bicubic.py
@@ -0,0 +1,48 @@
+# dataset settings
+dataset_type = 'ImageNet'
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='RandomResizedCrop',
+ size=224,
+ backend='pillow',
+ interpolation='bicubic'),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='Resize',
+ size=(256, -1),
+ backend='pillow',
+ interpolation='bicubic'),
+ dict(type='CenterCrop', crop_size=224),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+data = dict(
+ samples_per_gpu=32,
+ workers_per_gpu=2,
+ train=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/train',
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline),
+ test=dict(
+ # replace `data/val` with `data/test` for standard test
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline))
+evaluation = dict(interval=1, metric='accuracy')
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/_base_/datasets/imagenet_bs32_pil_resize.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/imagenet_bs32_pil_resize.py
similarity index 91%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/_base_/datasets/imagenet_bs32_pil_resize.py
rename to openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/imagenet_bs32_pil_resize.py
index 22b74f76..ee452e23 100644
--- a/openmmlab_test/mmclassification-speed-benchmark/configs/_base_/datasets/imagenet_bs32_pil_resize.py
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/imagenet_bs32_pil_resize.py
@@ -20,7 +20,7 @@ test_pipeline = [
dict(type='Collect', keys=['img'])
]
data = dict(
- samples_per_gpu=32,
+ samples_per_gpu=128,
workers_per_gpu=2,
train=dict(
type=dataset_type,
@@ -34,7 +34,7 @@ data = dict(
test=dict(
# replace `data/val` with `data/test` for standard test
type=dataset_type,
- data_prefix='data/imagenet/val',
- ann_file='data/imagenet/meta/val.txt',
+ data_prefix='/public/DL_DATA/ImageNet-pytorch/val/',
+ #ann_file='data/val_list.txt',
pipeline=test_pipeline))
evaluation = dict(interval=1, metric='accuracy')
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/_base_/datasets/imagenet_bs64.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/imagenet_bs64.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/_base_/datasets/imagenet_bs64.py
rename to openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/imagenet_bs64.py
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/imagenet_bs64_autoaug.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/imagenet_bs64_autoaug.py
new file mode 100644
index 00000000..a1092a31
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/imagenet_bs64_autoaug.py
@@ -0,0 +1,43 @@
+_base_ = ['./pipelines/auto_aug.py']
+
+# dataset settings
+dataset_type = 'ImageNet'
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='RandomResizedCrop', size=224),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(type='AutoAugment', policies={{_base_.auto_increasing_policies}}),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='Resize', size=(256, -1)),
+ dict(type='CenterCrop', crop_size=224),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+data = dict(
+ samples_per_gpu=64,
+ workers_per_gpu=2,
+ train=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/train',
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline),
+ test=dict(
+ # replace `data/val` with `data/test` for standard test
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline))
+evaluation = dict(interval=1, metric='accuracy')
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/imagenet_bs64_convmixer_224.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/imagenet_bs64_convmixer_224.py
new file mode 100644
index 00000000..afd71136
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/imagenet_bs64_convmixer_224.py
@@ -0,0 +1,71 @@
+_base_ = ['./pipelines/rand_aug.py']
+
+# dataset settings
+dataset_type = 'ImageNet'
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='RandomResizedCrop',
+ size=224,
+ backend='pillow',
+ interpolation='bicubic'),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(
+ type='RandAugment',
+ policies={{_base_.rand_increasing_policies}},
+ num_policies=2,
+ total_level=10,
+ magnitude_level=9,
+ magnitude_std=0.5,
+ hparams=dict(
+ pad_val=[round(x) for x in img_norm_cfg['mean'][::-1]],
+ interpolation='bicubic')),
+ dict(
+ type='RandomErasing',
+ erase_prob=0.25,
+ mode='rand',
+ min_area_ratio=0.02,
+ max_area_ratio=1 / 3,
+ fill_color=img_norm_cfg['mean'][::-1],
+ fill_std=img_norm_cfg['std'][::-1]),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='Resize',
+ size=(233, -1),
+ backend='pillow',
+ interpolation='bicubic'),
+ dict(type='CenterCrop', crop_size=224),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+data = dict(
+ samples_per_gpu=64,
+ workers_per_gpu=8,
+ train=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/train',
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline),
+ test=dict(
+ # replace `data/val` with `data/test` for standard test
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline))
+
+evaluation = dict(interval=10, metric='accuracy')
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/imagenet_bs64_mixer_224.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/imagenet_bs64_mixer_224.py
new file mode 100644
index 00000000..a005436d
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/imagenet_bs64_mixer_224.py
@@ -0,0 +1,48 @@
+# dataset settings
+dataset_type = 'ImageNet'
+
+# change according to https://github.com/rwightman/pytorch-image-models/blob
+# /master/timm/models/mlp_mixer.py
+img_norm_cfg = dict(
+ mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_rgb=True)
+
+# training is not supported for now
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='RandomResizedCrop', size=224, backend='cv2'),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='Resize', size=(256, -1), backend='cv2', interpolation='bicubic'),
+ dict(type='CenterCrop', crop_size=224),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+data = dict(
+ samples_per_gpu=64,
+ workers_per_gpu=8,
+ train=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/train',
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline),
+ test=dict(
+ # replace `data/val` with `data/test` for standard test
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline))
+
+evaluation = dict(interval=10, metric='accuracy')
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/_base_/datasets/imagenet_bs64_pil_resize.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/imagenet_bs64_pil_resize.py
similarity index 92%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/_base_/datasets/imagenet_bs64_pil_resize.py
rename to openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/imagenet_bs64_pil_resize.py
index 95d0e1f2..61a25646 100644
--- a/openmmlab_test/mmclassification-speed-benchmark/configs/_base_/datasets/imagenet_bs64_pil_resize.py
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/imagenet_bs64_pil_resize.py
@@ -34,7 +34,7 @@ data = dict(
test=dict(
# replace `data/val` with `data/test` for standard test
type=dataset_type,
- data_prefix='data/imagenet/val',
- ann_file='data/imagenet/meta/val.txt',
+ data_prefix='/public/DL_DATA/ImageNet-pytorch/val',
+ #ann_file='data/imagenet/meta/val.txt',
pipeline=test_pipeline))
evaluation = dict(interval=1, metric='accuracy')
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/imagenet_bs64_pil_resize_autoaug.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/imagenet_bs64_pil_resize_autoaug.py
new file mode 100644
index 00000000..2a9a4de8
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/imagenet_bs64_pil_resize_autoaug.py
@@ -0,0 +1,53 @@
+_base_ = [
+ 'pipelines/auto_aug.py',
+]
+
+# dataset settings
+dataset_type = 'ImageNet'
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='RandomResizedCrop',
+ size=224,
+ backend='pillow',
+ interpolation='bicubic'),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(type='AutoAugment', policies={{_base_.policy_imagenet}}),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='Resize',
+ size=(256, -1),
+ backend='pillow',
+ interpolation='bicubic'),
+ dict(type='CenterCrop', crop_size=224),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+data = dict(
+ samples_per_gpu=64,
+ workers_per_gpu=2,
+ train=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/train',
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline),
+ test=dict(
+ # replace `data/val` with `data/test` for standard test
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline))
+evaluation = dict(interval=1, metric='accuracy')
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/imagenet_bs64_swin_224.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/imagenet_bs64_swin_224.py
new file mode 100644
index 00000000..4a059a33
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/imagenet_bs64_swin_224.py
@@ -0,0 +1,71 @@
+_base_ = ['./pipelines/rand_aug.py']
+
+# dataset settings
+dataset_type = 'ImageNet'
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='RandomResizedCrop',
+ size=224,
+ backend='pillow',
+ interpolation='bicubic'),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(
+ type='RandAugment',
+ policies={{_base_.rand_increasing_policies}},
+ num_policies=2,
+ total_level=10,
+ magnitude_level=9,
+ magnitude_std=0.5,
+ hparams=dict(
+ pad_val=[round(x) for x in img_norm_cfg['mean'][::-1]],
+ interpolation='bicubic')),
+ dict(
+ type='RandomErasing',
+ erase_prob=0.25,
+ mode='rand',
+ min_area_ratio=0.02,
+ max_area_ratio=1 / 3,
+ fill_color=img_norm_cfg['mean'][::-1],
+ fill_std=img_norm_cfg['std'][::-1]),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='Resize',
+ size=(256, -1),
+ backend='pillow',
+ interpolation='bicubic'),
+ dict(type='CenterCrop', crop_size=224),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+data = dict(
+ samples_per_gpu=64,
+ workers_per_gpu=8,
+ train=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/train',
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline),
+ test=dict(
+ # replace `data/val` with `data/test` for standard test
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline))
+
+evaluation = dict(interval=10, metric='accuracy')
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/imagenet_bs64_swin_256.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/imagenet_bs64_swin_256.py
new file mode 100644
index 00000000..1f73683a
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/imagenet_bs64_swin_256.py
@@ -0,0 +1,71 @@
+_base_ = ['./pipelines/rand_aug.py']
+
+# dataset settings
+dataset_type = 'ImageNet'
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='RandomResizedCrop',
+ size=256,
+ backend='pillow',
+ interpolation='bicubic'),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(
+ type='RandAugment',
+ policies={{_base_.rand_increasing_policies}},
+ num_policies=2,
+ total_level=10,
+ magnitude_level=9,
+ magnitude_std=0.5,
+ hparams=dict(
+ pad_val=[round(x) for x in img_norm_cfg['mean'][::-1]],
+ interpolation='bicubic')),
+ dict(
+ type='RandomErasing',
+ erase_prob=0.25,
+ mode='rand',
+ min_area_ratio=0.02,
+ max_area_ratio=1 / 3,
+ fill_color=img_norm_cfg['mean'][::-1],
+ fill_std=img_norm_cfg['std'][::-1]),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='Resize',
+ size=(292, -1), # ( 256 / 224 * 256 )
+ backend='pillow',
+ interpolation='bicubic'),
+ dict(type='CenterCrop', crop_size=256),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+data = dict(
+ samples_per_gpu=64,
+ workers_per_gpu=8,
+ train=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/train',
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline),
+ test=dict(
+ # replace `data/val` with `data/test` for standard test
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline))
+
+evaluation = dict(interval=10, metric='accuracy')
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/imagenet_bs64_swin_384.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/imagenet_bs64_swin_384.py
new file mode 100644
index 00000000..d2639399
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/imagenet_bs64_swin_384.py
@@ -0,0 +1,43 @@
+# dataset settings
+dataset_type = 'ImageNet'
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='RandomResizedCrop',
+ size=384,
+ backend='pillow',
+ interpolation='bicubic'),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='Resize', size=384, backend='pillow', interpolation='bicubic'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+data = dict(
+ samples_per_gpu=64,
+ workers_per_gpu=8,
+ train=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/train',
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline),
+ test=dict(
+ # replace `data/val` with `data/test` for standard test
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline))
+evaluation = dict(interval=10, metric='accuracy')
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/imagenet_bs64_t2t_224.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/imagenet_bs64_t2t_224.py
new file mode 100644
index 00000000..1190d6f9
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/imagenet_bs64_t2t_224.py
@@ -0,0 +1,71 @@
+_base_ = ['./pipelines/rand_aug.py']
+
+# dataset settings
+dataset_type = 'ImageNet'
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='RandomResizedCrop',
+ size=224,
+ backend='pillow',
+ interpolation='bicubic'),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(
+ type='RandAugment',
+ policies={{_base_.rand_increasing_policies}},
+ num_policies=2,
+ total_level=10,
+ magnitude_level=9,
+ magnitude_std=0.5,
+ hparams=dict(
+ pad_val=[round(x) for x in img_norm_cfg['mean'][::-1]],
+ interpolation='bicubic')),
+ dict(
+ type='RandomErasing',
+ erase_prob=0.25,
+ mode='rand',
+ min_area_ratio=0.02,
+ max_area_ratio=1 / 3,
+ fill_color=img_norm_cfg['mean'][::-1],
+ fill_std=img_norm_cfg['std'][::-1]),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='Resize',
+ size=(248, -1),
+ backend='pillow',
+ interpolation='bicubic'),
+ dict(type='CenterCrop', crop_size=224),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+data = dict(
+ samples_per_gpu=64,
+ workers_per_gpu=4,
+ train=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/train',
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline),
+ test=dict(
+ # replace `data/val` with `data/test` for standard test
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline))
+
+evaluation = dict(interval=1, metric='accuracy', save_best='auto')
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/pipelines/auto_aug.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/pipelines/auto_aug.py
new file mode 100644
index 00000000..5a10f7ee
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/pipelines/auto_aug.py
@@ -0,0 +1,96 @@
+# Policy for ImageNet, refers to
+# https://github.com/DeepVoltaire/AutoAugment/blame/master/autoaugment.py
+policy_imagenet = [
+ [
+ dict(type='Posterize', bits=4, prob=0.4),
+ dict(type='Rotate', angle=30., prob=0.6)
+ ],
+ [
+ dict(type='Solarize', thr=256 / 9 * 4, prob=0.6),
+ dict(type='AutoContrast', prob=0.6)
+ ],
+ [dict(type='Equalize', prob=0.8),
+ dict(type='Equalize', prob=0.6)],
+ [
+ dict(type='Posterize', bits=5, prob=0.6),
+ dict(type='Posterize', bits=5, prob=0.6)
+ ],
+ [
+ dict(type='Equalize', prob=0.4),
+ dict(type='Solarize', thr=256 / 9 * 5, prob=0.2)
+ ],
+ [
+ dict(type='Equalize', prob=0.4),
+ dict(type='Rotate', angle=30 / 9 * 8, prob=0.8)
+ ],
+ [
+ dict(type='Solarize', thr=256 / 9 * 6, prob=0.6),
+ dict(type='Equalize', prob=0.6)
+ ],
+ [dict(type='Posterize', bits=6, prob=0.8),
+ dict(type='Equalize', prob=1.)],
+ [
+ dict(type='Rotate', angle=10., prob=0.2),
+ dict(type='Solarize', thr=256 / 9, prob=0.6)
+ ],
+ [
+ dict(type='Equalize', prob=0.6),
+ dict(type='Posterize', bits=5, prob=0.4)
+ ],
+ [
+ dict(type='Rotate', angle=30 / 9 * 8, prob=0.8),
+ dict(type='ColorTransform', magnitude=0., prob=0.4)
+ ],
+ [
+ dict(type='Rotate', angle=30., prob=0.4),
+ dict(type='Equalize', prob=0.6)
+ ],
+ [dict(type='Equalize', prob=0.0),
+ dict(type='Equalize', prob=0.8)],
+ [dict(type='Invert', prob=0.6),
+ dict(type='Equalize', prob=1.)],
+ [
+ dict(type='ColorTransform', magnitude=0.4, prob=0.6),
+ dict(type='Contrast', magnitude=0.8, prob=1.)
+ ],
+ [
+ dict(type='Rotate', angle=30 / 9 * 8, prob=0.8),
+ dict(type='ColorTransform', magnitude=0.2, prob=1.)
+ ],
+ [
+ dict(type='ColorTransform', magnitude=0.8, prob=0.8),
+ dict(type='Solarize', thr=256 / 9 * 2, prob=0.8)
+ ],
+ [
+ dict(type='Sharpness', magnitude=0.7, prob=0.4),
+ dict(type='Invert', prob=0.6)
+ ],
+ [
+ dict(
+ type='Shear',
+ magnitude=0.3 / 9 * 5,
+ prob=0.6,
+ direction='horizontal'),
+ dict(type='Equalize', prob=1.)
+ ],
+ [
+ dict(type='ColorTransform', magnitude=0., prob=0.4),
+ dict(type='Equalize', prob=0.6)
+ ],
+ [
+ dict(type='Equalize', prob=0.4),
+ dict(type='Solarize', thr=256 / 9 * 5, prob=0.2)
+ ],
+ [
+ dict(type='Solarize', thr=256 / 9 * 4, prob=0.6),
+ dict(type='AutoContrast', prob=0.6)
+ ],
+ [dict(type='Invert', prob=0.6),
+ dict(type='Equalize', prob=1.)],
+ [
+ dict(type='ColorTransform', magnitude=0.4, prob=0.6),
+ dict(type='Contrast', magnitude=0.8, prob=1.)
+ ],
+ [dict(type='Equalize', prob=0.8),
+ dict(type='Equalize', prob=0.6)],
+]
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/pipelines/rand_aug.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/pipelines/rand_aug.py
new file mode 100644
index 00000000..f2bab3c3
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/pipelines/rand_aug.py
@@ -0,0 +1,43 @@
+# Refers to `_RAND_INCREASING_TRANSFORMS` in pytorch-image-models
+rand_increasing_policies = [
+ dict(type='AutoContrast'),
+ dict(type='Equalize'),
+ dict(type='Invert'),
+ dict(type='Rotate', magnitude_key='angle', magnitude_range=(0, 30)),
+ dict(type='Posterize', magnitude_key='bits', magnitude_range=(4, 0)),
+ dict(type='Solarize', magnitude_key='thr', magnitude_range=(256, 0)),
+ dict(
+ type='SolarizeAdd',
+ magnitude_key='magnitude',
+ magnitude_range=(0, 110)),
+ dict(
+ type='ColorTransform',
+ magnitude_key='magnitude',
+ magnitude_range=(0, 0.9)),
+ dict(type='Contrast', magnitude_key='magnitude', magnitude_range=(0, 0.9)),
+ dict(
+ type='Brightness', magnitude_key='magnitude',
+ magnitude_range=(0, 0.9)),
+ dict(
+ type='Sharpness', magnitude_key='magnitude', magnitude_range=(0, 0.9)),
+ dict(
+ type='Shear',
+ magnitude_key='magnitude',
+ magnitude_range=(0, 0.3),
+ direction='horizontal'),
+ dict(
+ type='Shear',
+ magnitude_key='magnitude',
+ magnitude_range=(0, 0.3),
+ direction='vertical'),
+ dict(
+ type='Translate',
+ magnitude_key='magnitude',
+ magnitude_range=(0, 0.45),
+ direction='horizontal'),
+ dict(
+ type='Translate',
+ magnitude_key='magnitude',
+ magnitude_range=(0, 0.45),
+ direction='vertical')
+]
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/stanford_cars_bs8_448.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/stanford_cars_bs8_448.py
new file mode 100644
index 00000000..636b2e14
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/stanford_cars_bs8_448.py
@@ -0,0 +1,46 @@
+# dataset settings
+dataset_type = 'StanfordCars'
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='Resize', size=512),
+ dict(type='RandomCrop', size=448),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='Resize', size=512),
+ dict(type='CenterCrop', crop_size=448),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+
+data_root = 'data/stanfordcars'
+data = dict(
+ samples_per_gpu=8,
+ workers_per_gpu=2,
+ train=dict(
+ type=dataset_type,
+ data_prefix=data_root,
+ test_mode=False,
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ data_prefix=data_root,
+ test_mode=True,
+ pipeline=test_pipeline),
+ test=dict(
+ type=dataset_type,
+ data_prefix=data_root,
+ test_mode=True,
+ pipeline=test_pipeline))
+
+evaluation = dict(
+ interval=1, metric='accuracy',
+ save_best='auto') # save the checkpoint with highest accuracy
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/_base_/datasets/voc_bs16.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/voc_bs16.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/_base_/datasets/voc_bs16.py
rename to openmmlab_test/mmclassification-0.24.1/configs/_base_/datasets/voc_bs16.py
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/_base_/default_runtime.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/default_runtime.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/_base_/default_runtime.py
rename to openmmlab_test/mmclassification-0.24.1/configs/_base_/default_runtime.py
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/conformer/base-p16.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/conformer/base-p16.py
new file mode 100644
index 00000000..157dcc98
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/conformer/base-p16.py
@@ -0,0 +1,22 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='Conformer', arch='base', drop_path_rate=0.1, init_cfg=None),
+ neck=None,
+ head=dict(
+ type='ConformerHead',
+ num_classes=1000,
+ in_channels=[1536, 576],
+ init_cfg=None,
+ loss=dict(
+ type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'),
+ cal_acc=False),
+ init_cfg=[
+ dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.),
+ dict(type='Constant', layer='LayerNorm', val=1., bias=0.)
+ ],
+ train_cfg=dict(augments=[
+ dict(type='BatchMixup', alpha=0.8, num_classes=1000, prob=0.5),
+ dict(type='BatchCutMix', alpha=1.0, num_classes=1000, prob=0.5)
+ ]))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/conformer/small-p16.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/conformer/small-p16.py
new file mode 100644
index 00000000..17298089
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/conformer/small-p16.py
@@ -0,0 +1,22 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='Conformer', arch='small', drop_path_rate=0.1, init_cfg=None),
+ neck=None,
+ head=dict(
+ type='ConformerHead',
+ num_classes=1000,
+ in_channels=[1024, 384],
+ init_cfg=None,
+ loss=dict(
+ type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'),
+ cal_acc=False),
+ init_cfg=[
+ dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.),
+ dict(type='Constant', layer='LayerNorm', val=1., bias=0.)
+ ],
+ train_cfg=dict(augments=[
+ dict(type='BatchMixup', alpha=0.8, num_classes=1000, prob=0.5),
+ dict(type='BatchCutMix', alpha=1.0, num_classes=1000, prob=0.5)
+ ]))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/conformer/small-p32.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/conformer/small-p32.py
new file mode 100644
index 00000000..593aba12
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/conformer/small-p32.py
@@ -0,0 +1,26 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='Conformer',
+ arch='small',
+ patch_size=32,
+ drop_path_rate=0.1,
+ init_cfg=None),
+ neck=None,
+ head=dict(
+ type='ConformerHead',
+ num_classes=1000,
+ in_channels=[1024, 384],
+ init_cfg=None,
+ loss=dict(
+ type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'),
+ cal_acc=False),
+ init_cfg=[
+ dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.),
+ dict(type='Constant', layer='LayerNorm', val=1., bias=0.)
+ ],
+ train_cfg=dict(augments=[
+ dict(type='BatchMixup', alpha=0.8, num_classes=1000, prob=0.5),
+ dict(type='BatchCutMix', alpha=1.0, num_classes=1000, prob=0.5)
+ ]))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/conformer/tiny-p16.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/conformer/tiny-p16.py
new file mode 100644
index 00000000..dad8ecae
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/conformer/tiny-p16.py
@@ -0,0 +1,22 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='Conformer', arch='tiny', drop_path_rate=0.1, init_cfg=None),
+ neck=None,
+ head=dict(
+ type='ConformerHead',
+ num_classes=1000,
+ in_channels=[256, 384],
+ init_cfg=None,
+ loss=dict(
+ type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'),
+ cal_acc=False),
+ init_cfg=[
+ dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.),
+ dict(type='Constant', layer='LayerNorm', val=1., bias=0.)
+ ],
+ train_cfg=dict(augments=[
+ dict(type='BatchMixup', alpha=0.8, num_classes=1000, prob=0.5),
+ dict(type='BatchCutMix', alpha=1.0, num_classes=1000, prob=0.5)
+ ]))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/convmixer/convmixer-1024-20.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/convmixer/convmixer-1024-20.py
new file mode 100644
index 00000000..a8f4d517
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/convmixer/convmixer-1024-20.py
@@ -0,0 +1,11 @@
+# Model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(type='ConvMixer', arch='1024/20'),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=1024,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ ))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/convmixer/convmixer-1536-20.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/convmixer/convmixer-1536-20.py
new file mode 100644
index 00000000..9ad8209b
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/convmixer/convmixer-1536-20.py
@@ -0,0 +1,11 @@
+# Model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(type='ConvMixer', arch='1536/20'),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=1536,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ ))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/convmixer/convmixer-768-32.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/convmixer/convmixer-768-32.py
new file mode 100644
index 00000000..1cba528b
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/convmixer/convmixer-768-32.py
@@ -0,0 +1,11 @@
+# Model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(type='ConvMixer', arch='768/32', act_cfg=dict(type='ReLU')),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=768,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ ))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/convnext/convnext-base.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/convnext/convnext-base.py
new file mode 100644
index 00000000..7fc5ce71
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/convnext/convnext-base.py
@@ -0,0 +1,23 @@
+# Model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='ConvNeXt',
+ arch='base',
+ out_indices=(3, ),
+ drop_path_rate=0.5,
+ gap_before_final_norm=True,
+ init_cfg=[
+ dict(
+ type='TruncNormal',
+ layer=['Conv2d', 'Linear'],
+ std=.02,
+ bias=0.),
+ dict(type='Constant', layer=['LayerNorm'], val=1., bias=0.),
+ ]),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=1024,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ ))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/convnext/convnext-large.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/convnext/convnext-large.py
new file mode 100644
index 00000000..4d9e37c0
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/convnext/convnext-large.py
@@ -0,0 +1,23 @@
+# Model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='ConvNeXt',
+ arch='large',
+ out_indices=(3, ),
+ drop_path_rate=0.5,
+ gap_before_final_norm=True,
+ init_cfg=[
+ dict(
+ type='TruncNormal',
+ layer=['Conv2d', 'Linear'],
+ std=.02,
+ bias=0.),
+ dict(type='Constant', layer=['LayerNorm'], val=1., bias=0.),
+ ]),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=1536,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ ))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/convnext/convnext-small.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/convnext/convnext-small.py
new file mode 100644
index 00000000..989ad1d4
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/convnext/convnext-small.py
@@ -0,0 +1,23 @@
+# Model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='ConvNeXt',
+ arch='small',
+ out_indices=(3, ),
+ drop_path_rate=0.4,
+ gap_before_final_norm=True,
+ init_cfg=[
+ dict(
+ type='TruncNormal',
+ layer=['Conv2d', 'Linear'],
+ std=.02,
+ bias=0.),
+ dict(type='Constant', layer=['LayerNorm'], val=1., bias=0.),
+ ]),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=768,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ ))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/convnext/convnext-tiny.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/convnext/convnext-tiny.py
new file mode 100644
index 00000000..0b692abb
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/convnext/convnext-tiny.py
@@ -0,0 +1,23 @@
+# Model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='ConvNeXt',
+ arch='tiny',
+ out_indices=(3, ),
+ drop_path_rate=0.1,
+ gap_before_final_norm=True,
+ init_cfg=[
+ dict(
+ type='TruncNormal',
+ layer=['Conv2d', 'Linear'],
+ std=.02,
+ bias=0.),
+ dict(type='Constant', layer=['LayerNorm'], val=1., bias=0.),
+ ]),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=768,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ ))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/convnext/convnext-xlarge.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/convnext/convnext-xlarge.py
new file mode 100644
index 00000000..0c75e325
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/convnext/convnext-xlarge.py
@@ -0,0 +1,23 @@
+# Model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='ConvNeXt',
+ arch='xlarge',
+ out_indices=(3, ),
+ drop_path_rate=0.5,
+ gap_before_final_norm=True,
+ init_cfg=[
+ dict(
+ type='TruncNormal',
+ layer=['Conv2d', 'Linear'],
+ std=.02,
+ bias=0.),
+ dict(type='Constant', layer=['LayerNorm'], val=1., bias=0.),
+ ]),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=2048,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ ))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/densenet/densenet121.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/densenet/densenet121.py
new file mode 100644
index 00000000..0a14d302
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/densenet/densenet121.py
@@ -0,0 +1,11 @@
+# Model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(type='DenseNet', arch='121'),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=1024,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ ))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/densenet/densenet161.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/densenet/densenet161.py
new file mode 100644
index 00000000..61a0d838
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/densenet/densenet161.py
@@ -0,0 +1,11 @@
+# Model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(type='DenseNet', arch='161'),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=2208,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ ))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/densenet/densenet169.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/densenet/densenet169.py
new file mode 100644
index 00000000..779ea170
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/densenet/densenet169.py
@@ -0,0 +1,11 @@
+# Model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(type='DenseNet', arch='169'),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=1664,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ ))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/densenet/densenet201.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/densenet/densenet201.py
new file mode 100644
index 00000000..2909af0d
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/densenet/densenet201.py
@@ -0,0 +1,11 @@
+# Model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(type='DenseNet', arch='201'),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=1920,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ ))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/efficientnet_b0.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/efficientnet_b0.py
new file mode 100644
index 00000000..d9ba6853
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/efficientnet_b0.py
@@ -0,0 +1,12 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(type='EfficientNet', arch='b0'),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=1280,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/efficientnet_b1.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/efficientnet_b1.py
new file mode 100644
index 00000000..63e15c88
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/efficientnet_b1.py
@@ -0,0 +1,12 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(type='EfficientNet', arch='b1'),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=1280,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/efficientnet_b2.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/efficientnet_b2.py
new file mode 100644
index 00000000..5edcfa5d
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/efficientnet_b2.py
@@ -0,0 +1,12 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(type='EfficientNet', arch='b2'),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=1408,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/efficientnet_b3.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/efficientnet_b3.py
new file mode 100644
index 00000000..c7c6d6d8
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/efficientnet_b3.py
@@ -0,0 +1,12 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(type='EfficientNet', arch='b3'),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=1536,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/efficientnet_b4.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/efficientnet_b4.py
new file mode 100644
index 00000000..06840ed5
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/efficientnet_b4.py
@@ -0,0 +1,12 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(type='EfficientNet', arch='b4'),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=1792,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/efficientnet_b5.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/efficientnet_b5.py
new file mode 100644
index 00000000..a86eebd1
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/efficientnet_b5.py
@@ -0,0 +1,12 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(type='EfficientNet', arch='b5'),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=2048,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/efficientnet_b6.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/efficientnet_b6.py
new file mode 100644
index 00000000..4eada1d3
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/efficientnet_b6.py
@@ -0,0 +1,12 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(type='EfficientNet', arch='b6'),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=2304,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/efficientnet_b7.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/efficientnet_b7.py
new file mode 100644
index 00000000..1d84ba42
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/efficientnet_b7.py
@@ -0,0 +1,12 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(type='EfficientNet', arch='b7'),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=2560,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/efficientnet_b8.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/efficientnet_b8.py
new file mode 100644
index 00000000..c9500644
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/efficientnet_b8.py
@@ -0,0 +1,12 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(type='EfficientNet', arch='b8'),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=2816,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/efficientnet_em.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/efficientnet_em.py
new file mode 100644
index 00000000..abecdbee
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/efficientnet_em.py
@@ -0,0 +1,13 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ # `em` means EfficientNet-EdgeTPU-M arch
+ backbone=dict(type='EfficientNet', arch='em', act_cfg=dict(type='ReLU')),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=1280,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/efficientnet_es.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/efficientnet_es.py
new file mode 100644
index 00000000..911ba4a1
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/efficientnet_es.py
@@ -0,0 +1,13 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ # `es` means EfficientNet-EdgeTPU-S arch
+ backbone=dict(type='EfficientNet', arch='es', act_cfg=dict(type='ReLU')),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=1280,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/hornet/hornet-base-gf.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/hornet/hornet-base-gf.py
new file mode 100644
index 00000000..7544970f
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/hornet/hornet-base-gf.py
@@ -0,0 +1,21 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(type='HorNet', arch='base-gf', drop_path_rate=0.5),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=1024,
+ init_cfg=None, # suppress the default init_cfg of LinearClsHead.
+ loss=dict(
+ type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'),
+ cal_acc=False),
+ init_cfg=[
+ dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.),
+ dict(type='Constant', layer='LayerNorm', val=1., bias=0.),
+ dict(type='Constant', layer=['LayerScale'], val=1e-6)
+ ],
+ train_cfg=dict(augments=[
+ dict(type='BatchMixup', alpha=0.8, num_classes=1000, prob=0.5),
+ dict(type='BatchCutMix', alpha=1.0, num_classes=1000, prob=0.5)
+ ]))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/hornet/hornet-base.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/hornet/hornet-base.py
new file mode 100644
index 00000000..82764146
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/hornet/hornet-base.py
@@ -0,0 +1,21 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(type='HorNet', arch='base', drop_path_rate=0.5),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=1024,
+ init_cfg=None, # suppress the default init_cfg of LinearClsHead.
+ loss=dict(
+ type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'),
+ cal_acc=False),
+ init_cfg=[
+ dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.),
+ dict(type='Constant', layer='LayerNorm', val=1., bias=0.),
+ dict(type='Constant', layer=['LayerScale'], val=1e-6)
+ ],
+ train_cfg=dict(augments=[
+ dict(type='BatchMixup', alpha=0.8, num_classes=1000, prob=0.5),
+ dict(type='BatchCutMix', alpha=1.0, num_classes=1000, prob=0.5)
+ ]))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/hornet/hornet-large-gf.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/hornet/hornet-large-gf.py
new file mode 100644
index 00000000..a5b55113
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/hornet/hornet-large-gf.py
@@ -0,0 +1,21 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(type='HorNet', arch='large-gf', drop_path_rate=0.2),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=1536,
+ init_cfg=None, # suppress the default init_cfg of LinearClsHead.
+ loss=dict(
+ type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'),
+ cal_acc=False),
+ init_cfg=[
+ dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.),
+ dict(type='Constant', layer='LayerNorm', val=1., bias=0.),
+ dict(type='Constant', layer=['LayerScale'], val=1e-6)
+ ],
+ train_cfg=dict(augments=[
+ dict(type='BatchMixup', alpha=0.8, num_classes=1000, prob=0.5),
+ dict(type='BatchCutMix', alpha=1.0, num_classes=1000, prob=0.5)
+ ]))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/hornet/hornet-large-gf384.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/hornet/hornet-large-gf384.py
new file mode 100644
index 00000000..fbb54787
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/hornet/hornet-large-gf384.py
@@ -0,0 +1,17 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(type='HorNet', arch='large-gf384', drop_path_rate=0.4),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=1536,
+ init_cfg=None, # suppress the default init_cfg of LinearClsHead.
+ loss=dict(
+ type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'),
+ cal_acc=False),
+ init_cfg=[
+ dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.),
+ dict(type='Constant', layer='LayerNorm', val=1., bias=0.),
+ dict(type='Constant', layer=['LayerScale'], val=1e-6)
+ ])
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/hornet/hornet-large.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/hornet/hornet-large.py
new file mode 100644
index 00000000..26d99e1a
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/hornet/hornet-large.py
@@ -0,0 +1,21 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(type='HorNet', arch='large', drop_path_rate=0.2),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=1536,
+ init_cfg=None, # suppress the default init_cfg of LinearClsHead.
+ loss=dict(
+ type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'),
+ cal_acc=False),
+ init_cfg=[
+ dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.),
+ dict(type='Constant', layer='LayerNorm', val=1., bias=0.),
+ dict(type='Constant', layer=['LayerScale'], val=1e-6)
+ ],
+ train_cfg=dict(augments=[
+ dict(type='BatchMixup', alpha=0.8, num_classes=1000, prob=0.5),
+ dict(type='BatchCutMix', alpha=1.0, num_classes=1000, prob=0.5)
+ ]))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/hornet/hornet-small-gf.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/hornet/hornet-small-gf.py
new file mode 100644
index 00000000..42d9d119
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/hornet/hornet-small-gf.py
@@ -0,0 +1,21 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(type='HorNet', arch='small-gf', drop_path_rate=0.4),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=768,
+ init_cfg=None, # suppress the default init_cfg of LinearClsHead.
+ loss=dict(
+ type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'),
+ cal_acc=False),
+ init_cfg=[
+ dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.),
+ dict(type='Constant', layer='LayerNorm', val=1., bias=0.),
+ dict(type='Constant', layer=['LayerScale'], val=1e-6)
+ ],
+ train_cfg=dict(augments=[
+ dict(type='BatchMixup', alpha=0.8, num_classes=1000, prob=0.5),
+ dict(type='BatchCutMix', alpha=1.0, num_classes=1000, prob=0.5)
+ ]))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/hornet/hornet-small.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/hornet/hornet-small.py
new file mode 100644
index 00000000..e8039765
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/hornet/hornet-small.py
@@ -0,0 +1,21 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(type='HorNet', arch='small', drop_path_rate=0.4),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=768,
+ init_cfg=None, # suppress the default init_cfg of LinearClsHead.
+ loss=dict(
+ type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'),
+ cal_acc=False),
+ init_cfg=[
+ dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.),
+ dict(type='Constant', layer='LayerNorm', val=1., bias=0.),
+ dict(type='Constant', layer=['LayerScale'], val=1e-6)
+ ],
+ train_cfg=dict(augments=[
+ dict(type='BatchMixup', alpha=0.8, num_classes=1000, prob=0.5),
+ dict(type='BatchCutMix', alpha=1.0, num_classes=1000, prob=0.5)
+ ]))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/hornet/hornet-tiny-gf.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/hornet/hornet-tiny-gf.py
new file mode 100644
index 00000000..0e417d04
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/hornet/hornet-tiny-gf.py
@@ -0,0 +1,21 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(type='HorNet', arch='tiny-gf', drop_path_rate=0.2),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=512,
+ init_cfg=None, # suppress the default init_cfg of LinearClsHead.
+ loss=dict(
+ type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'),
+ cal_acc=False),
+ init_cfg=[
+ dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.),
+ dict(type='Constant', layer='LayerNorm', val=1., bias=0.),
+ dict(type='Constant', layer=['LayerScale'], val=1e-6)
+ ],
+ train_cfg=dict(augments=[
+ dict(type='BatchMixup', alpha=0.8, num_classes=1000, prob=0.5),
+ dict(type='BatchCutMix', alpha=1.0, num_classes=1000, prob=0.5)
+ ]))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/hornet/hornet-tiny.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/hornet/hornet-tiny.py
new file mode 100644
index 00000000..068d7d6b
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/hornet/hornet-tiny.py
@@ -0,0 +1,21 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(type='HorNet', arch='tiny', drop_path_rate=0.2),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=512,
+ init_cfg=None, # suppress the default init_cfg of LinearClsHead.
+ loss=dict(
+ type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'),
+ cal_acc=False),
+ init_cfg=[
+ dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.),
+ dict(type='Constant', layer='LayerNorm', val=1., bias=0.),
+ dict(type='Constant', layer=['LayerScale'], val=1e-6)
+ ],
+ train_cfg=dict(augments=[
+ dict(type='BatchMixup', alpha=0.8, num_classes=1000, prob=0.5),
+ dict(type='BatchCutMix', alpha=1.0, num_classes=1000, prob=0.5)
+ ]))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/hrnet/hrnet-w18.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/hrnet/hrnet-w18.py
new file mode 100644
index 00000000..f7fbf298
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/hrnet/hrnet-w18.py
@@ -0,0 +1,15 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(type='HRNet', arch='w18'),
+ neck=[
+ dict(type='HRFuseScales', in_channels=(18, 36, 72, 144)),
+ dict(type='GlobalAveragePooling'),
+ ],
+ head=dict(
+ type='LinearClsHead',
+ in_channels=2048,
+ num_classes=1000,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/hrnet/hrnet-w30.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/hrnet/hrnet-w30.py
new file mode 100644
index 00000000..babcacac
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/hrnet/hrnet-w30.py
@@ -0,0 +1,15 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(type='HRNet', arch='w30'),
+ neck=[
+ dict(type='HRFuseScales', in_channels=(30, 60, 120, 240)),
+ dict(type='GlobalAveragePooling'),
+ ],
+ head=dict(
+ type='LinearClsHead',
+ in_channels=2048,
+ num_classes=1000,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/hrnet/hrnet-w32.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/hrnet/hrnet-w32.py
new file mode 100644
index 00000000..2c1e9800
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/hrnet/hrnet-w32.py
@@ -0,0 +1,15 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(type='HRNet', arch='w32'),
+ neck=[
+ dict(type='HRFuseScales', in_channels=(32, 64, 128, 256)),
+ dict(type='GlobalAveragePooling'),
+ ],
+ head=dict(
+ type='LinearClsHead',
+ in_channels=2048,
+ num_classes=1000,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/hrnet/hrnet-w40.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/hrnet/hrnet-w40.py
new file mode 100644
index 00000000..83f65d86
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/hrnet/hrnet-w40.py
@@ -0,0 +1,15 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(type='HRNet', arch='w40'),
+ neck=[
+ dict(type='HRFuseScales', in_channels=(40, 80, 160, 320)),
+ dict(type='GlobalAveragePooling'),
+ ],
+ head=dict(
+ type='LinearClsHead',
+ in_channels=2048,
+ num_classes=1000,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/hrnet/hrnet-w44.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/hrnet/hrnet-w44.py
new file mode 100644
index 00000000..e75dc0f8
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/hrnet/hrnet-w44.py
@@ -0,0 +1,15 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(type='HRNet', arch='w44'),
+ neck=[
+ dict(type='HRFuseScales', in_channels=(44, 88, 176, 352)),
+ dict(type='GlobalAveragePooling'),
+ ],
+ head=dict(
+ type='LinearClsHead',
+ in_channels=2048,
+ num_classes=1000,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/hrnet/hrnet-w48.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/hrnet/hrnet-w48.py
new file mode 100644
index 00000000..f0604958
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/hrnet/hrnet-w48.py
@@ -0,0 +1,15 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(type='HRNet', arch='w48'),
+ neck=[
+ dict(type='HRFuseScales', in_channels=(48, 96, 192, 384)),
+ dict(type='GlobalAveragePooling'),
+ ],
+ head=dict(
+ type='LinearClsHead',
+ in_channels=2048,
+ num_classes=1000,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/hrnet/hrnet-w64.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/hrnet/hrnet-w64.py
new file mode 100644
index 00000000..844c3fe9
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/hrnet/hrnet-w64.py
@@ -0,0 +1,15 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(type='HRNet', arch='w64'),
+ neck=[
+ dict(type='HRFuseScales', in_channels=(64, 128, 256, 512)),
+ dict(type='GlobalAveragePooling'),
+ ],
+ head=dict(
+ type='LinearClsHead',
+ in_channels=2048,
+ num_classes=1000,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/mlp_mixer_base_patch16.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/mlp_mixer_base_patch16.py
new file mode 100644
index 00000000..5ebd17f3
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/mlp_mixer_base_patch16.py
@@ -0,0 +1,25 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='MlpMixer',
+ arch='b',
+ img_size=224,
+ patch_size=16,
+ drop_rate=0.1,
+ init_cfg=[
+ dict(
+ type='Kaiming',
+ layer='Conv2d',
+ mode='fan_in',
+ nonlinearity='linear')
+ ]),
+ neck=dict(type='GlobalAveragePooling', dim=1),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=768,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ),
+)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/mlp_mixer_large_patch16.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/mlp_mixer_large_patch16.py
new file mode 100644
index 00000000..ff107139
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/mlp_mixer_large_patch16.py
@@ -0,0 +1,25 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='MlpMixer',
+ arch='l',
+ img_size=224,
+ patch_size=16,
+ drop_rate=0.1,
+ init_cfg=[
+ dict(
+ type='Kaiming',
+ layer='Conv2d',
+ mode='fan_in',
+ nonlinearity='linear')
+ ]),
+ neck=dict(type='GlobalAveragePooling', dim=1),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=1024,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ),
+)
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/mobilenet_v2_1x.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/mobilenet_v2_1x.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/mobilenet_v2_1x.py
rename to openmmlab_test/mmclassification-0.24.1/configs/_base_/models/mobilenet_v2_1x.py
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/mobilenet_v3_large_imagenet.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/mobilenet_v3_large_imagenet.py
new file mode 100644
index 00000000..5318f50f
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/mobilenet_v3_large_imagenet.py
@@ -0,0 +1,16 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(type='MobileNetV3', arch='large'),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='StackedLinearClsHead',
+ num_classes=1000,
+ in_channels=960,
+ mid_channels=[1280],
+ dropout_rate=0.2,
+ act_cfg=dict(type='HSwish'),
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ init_cfg=dict(
+ type='Normal', layer='Linear', mean=0., std=0.01, bias=0.),
+ topk=(1, 5)))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/mobilenet_v3_small_cifar.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/mobilenet_v3_small_cifar.py
new file mode 100644
index 00000000..5dbe980c
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/mobilenet_v3_small_cifar.py
@@ -0,0 +1,13 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(type='MobileNetV3', arch='small'),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='StackedLinearClsHead',
+ num_classes=10,
+ in_channels=576,
+ mid_channels=[1280],
+ act_cfg=dict(type='HSwish'),
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5)))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/mobilenet_v3_small_imagenet.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/mobilenet_v3_small_imagenet.py
new file mode 100644
index 00000000..af6cc1b8
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/mobilenet_v3_small_imagenet.py
@@ -0,0 +1,16 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(type='MobileNetV3', arch='small'),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='StackedLinearClsHead',
+ num_classes=1000,
+ in_channels=576,
+ mid_channels=[1024],
+ dropout_rate=0.2,
+ act_cfg=dict(type='HSwish'),
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ init_cfg=dict(
+ type='Normal', layer='Linear', mean=0., std=0.01, bias=0.),
+ topk=(1, 5)))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/mvit/mvitv2-base.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/mvit/mvitv2-base.py
new file mode 100644
index 00000000..c75e78ef
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/mvit/mvitv2-base.py
@@ -0,0 +1,19 @@
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(type='MViT', arch='base', drop_path_rate=0.3),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ in_channels=768,
+ num_classes=1000,
+ loss=dict(
+ type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'),
+ ),
+ init_cfg=[
+ dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.),
+ dict(type='Constant', layer='LayerNorm', val=1., bias=0.)
+ ],
+ train_cfg=dict(augments=[
+ dict(type='BatchMixup', alpha=0.8, num_classes=1000, prob=0.5),
+ dict(type='BatchCutMix', alpha=1.0, num_classes=1000, prob=0.5)
+ ]))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/mvit/mvitv2-large.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/mvit/mvitv2-large.py
new file mode 100644
index 00000000..aa4a3250
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/mvit/mvitv2-large.py
@@ -0,0 +1,23 @@
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='MViT',
+ arch='large',
+ drop_path_rate=0.5,
+ dim_mul_in_attention=False),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ in_channels=1152,
+ num_classes=1000,
+ loss=dict(
+ type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'),
+ ),
+ init_cfg=[
+ dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.),
+ dict(type='Constant', layer='LayerNorm', val=1., bias=0.)
+ ],
+ train_cfg=dict(augments=[
+ dict(type='BatchMixup', alpha=0.8, num_classes=1000, prob=0.5),
+ dict(type='BatchCutMix', alpha=1.0, num_classes=1000, prob=0.5)
+ ]))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/mvit/mvitv2-small.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/mvit/mvitv2-small.py
new file mode 100644
index 00000000..bb9329df
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/mvit/mvitv2-small.py
@@ -0,0 +1,19 @@
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(type='MViT', arch='small', drop_path_rate=0.1),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ in_channels=768,
+ num_classes=1000,
+ loss=dict(
+ type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'),
+ ),
+ init_cfg=[
+ dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.),
+ dict(type='Constant', layer='LayerNorm', val=1., bias=0.)
+ ],
+ train_cfg=dict(augments=[
+ dict(type='BatchMixup', alpha=0.8, num_classes=1000, prob=0.5),
+ dict(type='BatchCutMix', alpha=1.0, num_classes=1000, prob=0.5)
+ ]))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/mvit/mvitv2-tiny.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/mvit/mvitv2-tiny.py
new file mode 100644
index 00000000..7ca85dc3
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/mvit/mvitv2-tiny.py
@@ -0,0 +1,19 @@
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(type='MViT', arch='tiny', drop_path_rate=0.1),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ in_channels=768,
+ num_classes=1000,
+ loss=dict(
+ type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'),
+ ),
+ init_cfg=[
+ dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.),
+ dict(type='Constant', layer='LayerNorm', val=1., bias=0.)
+ ],
+ train_cfg=dict(augments=[
+ dict(type='BatchMixup', alpha=0.8, num_classes=1000, prob=0.5),
+ dict(type='BatchCutMix', alpha=1.0, num_classes=1000, prob=0.5)
+ ]))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/poolformer/poolformer_m36.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/poolformer/poolformer_m36.py
new file mode 100644
index 00000000..276a7212
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/poolformer/poolformer_m36.py
@@ -0,0 +1,22 @@
+# Model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='PoolFormer',
+ arch='m36',
+ drop_path_rate=0.1,
+ init_cfg=[
+ dict(
+ type='TruncNormal',
+ layer=['Conv2d', 'Linear'],
+ std=.02,
+ bias=0.),
+ dict(type='Constant', layer=['GroupNorm'], val=1., bias=0.),
+ ]),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=768,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ ))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/poolformer/poolformer_m48.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/poolformer/poolformer_m48.py
new file mode 100644
index 00000000..8c006acb
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/poolformer/poolformer_m48.py
@@ -0,0 +1,22 @@
+# Model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='PoolFormer',
+ arch='m48',
+ drop_path_rate=0.1,
+ init_cfg=[
+ dict(
+ type='TruncNormal',
+ layer=['Conv2d', 'Linear'],
+ std=.02,
+ bias=0.),
+ dict(type='Constant', layer=['GroupNorm'], val=1., bias=0.),
+ ]),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=768,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ ))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/poolformer/poolformer_s12.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/poolformer/poolformer_s12.py
new file mode 100644
index 00000000..b7b3600f
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/poolformer/poolformer_s12.py
@@ -0,0 +1,22 @@
+# Model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='PoolFormer',
+ arch='s12',
+ drop_path_rate=0.1,
+ init_cfg=[
+ dict(
+ type='TruncNormal',
+ layer=['Conv2d', 'Linear'],
+ std=.02,
+ bias=0.),
+ dict(type='Constant', layer=['GroupNorm'], val=1., bias=0.),
+ ]),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=512,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ ))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/poolformer/poolformer_s24.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/poolformer/poolformer_s24.py
new file mode 100644
index 00000000..822ab5b3
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/poolformer/poolformer_s24.py
@@ -0,0 +1,22 @@
+# Model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='PoolFormer',
+ arch='s24',
+ drop_path_rate=0.1,
+ init_cfg=[
+ dict(
+ type='TruncNormal',
+ layer=['Conv2d', 'Linear'],
+ std=.02,
+ bias=0.),
+ dict(type='Constant', layer=['GroupNorm'], val=1., bias=0.),
+ ]),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=512,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ ))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/poolformer/poolformer_s36.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/poolformer/poolformer_s36.py
new file mode 100644
index 00000000..489f2223
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/poolformer/poolformer_s36.py
@@ -0,0 +1,22 @@
+# Model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='PoolFormer',
+ arch='s36',
+ drop_path_rate=0.1,
+ init_cfg=[
+ dict(
+ type='TruncNormal',
+ layer=['Conv2d', 'Linear'],
+ std=.02,
+ bias=0.),
+ dict(type='Constant', layer=['GroupNorm'], val=1., bias=0.),
+ ]),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=512,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ ))
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/regnet/regnetx_1.6gf.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/regnet/regnetx_1.6gf.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/regnet/regnetx_1.6gf.py
rename to openmmlab_test/mmclassification-0.24.1/configs/_base_/models/regnet/regnetx_1.6gf.py
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/regnet/regnetx_12gf.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/regnet/regnetx_12gf.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/regnet/regnetx_12gf.py
rename to openmmlab_test/mmclassification-0.24.1/configs/_base_/models/regnet/regnetx_12gf.py
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/regnet/regnetx_3.2gf.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/regnet/regnetx_3.2gf.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/regnet/regnetx_3.2gf.py
rename to openmmlab_test/mmclassification-0.24.1/configs/_base_/models/regnet/regnetx_3.2gf.py
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/regnet/regnetx_4.0gf.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/regnet/regnetx_4.0gf.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/regnet/regnetx_4.0gf.py
rename to openmmlab_test/mmclassification-0.24.1/configs/_base_/models/regnet/regnetx_4.0gf.py
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/regnet/regnetx_400mf.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/regnet/regnetx_400mf.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/regnet/regnetx_400mf.py
rename to openmmlab_test/mmclassification-0.24.1/configs/_base_/models/regnet/regnetx_400mf.py
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/regnet/regnetx_6.4gf.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/regnet/regnetx_6.4gf.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/regnet/regnetx_6.4gf.py
rename to openmmlab_test/mmclassification-0.24.1/configs/_base_/models/regnet/regnetx_6.4gf.py
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/regnet/regnetx_8.0gf.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/regnet/regnetx_8.0gf.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/regnet/regnetx_8.0gf.py
rename to openmmlab_test/mmclassification-0.24.1/configs/_base_/models/regnet/regnetx_8.0gf.py
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/regnet/regnetx_800mf.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/regnet/regnetx_800mf.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/regnet/regnetx_800mf.py
rename to openmmlab_test/mmclassification-0.24.1/configs/_base_/models/regnet/regnetx_800mf.py
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/repmlp-base_224.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/repmlp-base_224.py
new file mode 100644
index 00000000..7db00778
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/repmlp-base_224.py
@@ -0,0 +1,18 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='RepMLPNet',
+ arch='B',
+ img_size=224,
+ out_indices=(3, ),
+ reparam_conv_kernels=(1, 3),
+ deploy=False),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=768,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/repvgg-A0_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/repvgg-A0_in1k.py
new file mode 100644
index 00000000..093ffb7e
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/repvgg-A0_in1k.py
@@ -0,0 +1,15 @@
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='RepVGG',
+ arch='A0',
+ out_indices=(3, ),
+ ),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=1280,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/repvgg-B3_lbs-mixup_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/repvgg-B3_lbs-mixup_in1k.py
new file mode 100644
index 00000000..5bb07db5
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/repvgg-B3_lbs-mixup_in1k.py
@@ -0,0 +1,23 @@
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='RepVGG',
+ arch='B3',
+ out_indices=(3, ),
+ ),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=2560,
+ loss=dict(
+ type='LabelSmoothLoss',
+ loss_weight=1.0,
+ label_smooth_val=0.1,
+ mode='classy_vision',
+ num_classes=1000),
+ topk=(1, 5),
+ ),
+ train_cfg=dict(
+ augments=dict(type='BatchMixup', alpha=0.2, num_classes=1000,
+ prob=1.)))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/res2net101-w26-s4.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/res2net101-w26-s4.py
new file mode 100644
index 00000000..3bf64c50
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/res2net101-w26-s4.py
@@ -0,0 +1,18 @@
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='Res2Net',
+ depth=101,
+ scales=4,
+ base_width=26,
+ deep_stem=False,
+ avg_down=False,
+ ),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=2048,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/res2net50-w14-s8.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/res2net50-w14-s8.py
new file mode 100644
index 00000000..5875142c
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/res2net50-w14-s8.py
@@ -0,0 +1,18 @@
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='Res2Net',
+ depth=50,
+ scales=8,
+ base_width=14,
+ deep_stem=False,
+ avg_down=False,
+ ),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=2048,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/res2net50-w26-s4.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/res2net50-w26-s4.py
new file mode 100644
index 00000000..be8fdb58
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/res2net50-w26-s4.py
@@ -0,0 +1,18 @@
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='Res2Net',
+ depth=50,
+ scales=4,
+ base_width=26,
+ deep_stem=False,
+ avg_down=False,
+ ),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=2048,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/res2net50-w26-s6.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/res2net50-w26-s6.py
new file mode 100644
index 00000000..281b136a
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/res2net50-w26-s6.py
@@ -0,0 +1,18 @@
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='Res2Net',
+ depth=50,
+ scales=6,
+ base_width=26,
+ deep_stem=False,
+ avg_down=False,
+ ),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=2048,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/res2net50-w26-s8.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/res2net50-w26-s8.py
new file mode 100644
index 00000000..b4f62f3e
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/res2net50-w26-s8.py
@@ -0,0 +1,18 @@
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='Res2Net',
+ depth=50,
+ scales=8,
+ base_width=26,
+ deep_stem=False,
+ avg_down=False,
+ ),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=2048,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/res2net50-w48-s2.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/res2net50-w48-s2.py
new file mode 100644
index 00000000..8675c91f
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/res2net50-w48-s2.py
@@ -0,0 +1,18 @@
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='Res2Net',
+ depth=50,
+ scales=2,
+ base_width=48,
+ deep_stem=False,
+ avg_down=False,
+ ),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=2048,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/resnest101.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/resnest101.py
new file mode 100644
index 00000000..97f7749c
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/resnest101.py
@@ -0,0 +1,24 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='ResNeSt',
+ depth=101,
+ num_stages=4,
+ stem_channels=128,
+ out_indices=(3, ),
+ style='pytorch'),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=2048,
+ loss=dict(
+ type='LabelSmoothLoss',
+ label_smooth_val=0.1,
+ num_classes=1000,
+ reduction='mean',
+ loss_weight=1.0),
+ topk=(1, 5),
+ cal_acc=False))
+train_cfg = dict(mixup=dict(alpha=0.2, num_classes=1000))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/resnest200.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/resnest200.py
new file mode 100644
index 00000000..46100178
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/resnest200.py
@@ -0,0 +1,24 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='ResNeSt',
+ depth=200,
+ num_stages=4,
+ stem_channels=128,
+ out_indices=(3, ),
+ style='pytorch'),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=2048,
+ loss=dict(
+ type='LabelSmoothLoss',
+ label_smooth_val=0.1,
+ num_classes=1000,
+ reduction='mean',
+ loss_weight=1.0),
+ topk=(1, 5),
+ cal_acc=False))
+train_cfg = dict(mixup=dict(alpha=0.2, num_classes=1000))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/resnest269.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/resnest269.py
new file mode 100644
index 00000000..ad365d03
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/resnest269.py
@@ -0,0 +1,24 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='ResNeSt',
+ depth=269,
+ num_stages=4,
+ stem_channels=128,
+ out_indices=(3, ),
+ style='pytorch'),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=2048,
+ loss=dict(
+ type='LabelSmoothLoss',
+ label_smooth_val=0.1,
+ num_classes=1000,
+ reduction='mean',
+ loss_weight=1.0),
+ topk=(1, 5),
+ cal_acc=False))
+train_cfg = dict(mixup=dict(alpha=0.2, num_classes=1000))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/resnest50.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/resnest50.py
new file mode 100644
index 00000000..15269d4a
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/resnest50.py
@@ -0,0 +1,23 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='ResNeSt',
+ depth=50,
+ num_stages=4,
+ out_indices=(3, ),
+ style='pytorch'),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=2048,
+ loss=dict(
+ type='LabelSmoothLoss',
+ label_smooth_val=0.1,
+ num_classes=1000,
+ reduction='mean',
+ loss_weight=1.0),
+ topk=(1, 5),
+ cal_acc=False))
+train_cfg = dict(mixup=dict(alpha=0.2, num_classes=1000))
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/resnet101.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/resnet101.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/resnet101.py
rename to openmmlab_test/mmclassification-0.24.1/configs/_base_/models/resnet101.py
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/resnet101_cifar.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/resnet101_cifar.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/resnet101_cifar.py
rename to openmmlab_test/mmclassification-0.24.1/configs/_base_/models/resnet101_cifar.py
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/resnet152.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/resnet152.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/resnet152.py
rename to openmmlab_test/mmclassification-0.24.1/configs/_base_/models/resnet152.py
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/resnet152_cifar.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/resnet152_cifar.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/resnet152_cifar.py
rename to openmmlab_test/mmclassification-0.24.1/configs/_base_/models/resnet152_cifar.py
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/resnet18.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/resnet18.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/resnet18.py
rename to openmmlab_test/mmclassification-0.24.1/configs/_base_/models/resnet18.py
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/resnet18_cifar.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/resnet18_cifar.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/resnet18_cifar.py
rename to openmmlab_test/mmclassification-0.24.1/configs/_base_/models/resnet18_cifar.py
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/resnet34.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/resnet34.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/resnet34.py
rename to openmmlab_test/mmclassification-0.24.1/configs/_base_/models/resnet34.py
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/resnet34_cifar.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/resnet34_cifar.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/resnet34_cifar.py
rename to openmmlab_test/mmclassification-0.24.1/configs/_base_/models/resnet34_cifar.py
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/resnet34_gem.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/resnet34_gem.py
new file mode 100644
index 00000000..5c0e0d3e
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/resnet34_gem.py
@@ -0,0 +1,17 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='ResNet',
+ depth=34,
+ num_stages=4,
+ out_indices=(3, ),
+ style='pytorch'),
+ neck=dict(type='GeneralizedMeanPooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=512,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ))
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/resnet50.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/resnet50.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/resnet50.py
rename to openmmlab_test/mmclassification-0.24.1/configs/_base_/models/resnet50.py
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/resnet50_cifar.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/resnet50_cifar.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/resnet50_cifar.py
rename to openmmlab_test/mmclassification-0.24.1/configs/_base_/models/resnet50_cifar.py
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/resnet50_cifar_cutmix.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/resnet50_cifar_cutmix.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/resnet50_cifar_cutmix.py
rename to openmmlab_test/mmclassification-0.24.1/configs/_base_/models/resnet50_cifar_cutmix.py
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/resnet50_cifar_mixup.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/resnet50_cifar_mixup.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/resnet50_cifar_mixup.py
rename to openmmlab_test/mmclassification-0.24.1/configs/_base_/models/resnet50_cifar_mixup.py
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/resnet50_cutmix.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/resnet50_cutmix.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/resnet50_cutmix.py
rename to openmmlab_test/mmclassification-0.24.1/configs/_base_/models/resnet50_cutmix.py
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/resnet50_label_smooth.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/resnet50_label_smooth.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/resnet50_label_smooth.py
rename to openmmlab_test/mmclassification-0.24.1/configs/_base_/models/resnet50_label_smooth.py
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/resnet50_mixup.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/resnet50_mixup.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/resnet50_mixup.py
rename to openmmlab_test/mmclassification-0.24.1/configs/_base_/models/resnet50_mixup.py
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/resnetv1c50.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/resnetv1c50.py
new file mode 100644
index 00000000..3b973e20
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/resnetv1c50.py
@@ -0,0 +1,17 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='ResNetV1c',
+ depth=50,
+ num_stages=4,
+ out_indices=(3, ),
+ style='pytorch'),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=2048,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ))
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/resnetv1d101.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/resnetv1d101.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/resnetv1d101.py
rename to openmmlab_test/mmclassification-0.24.1/configs/_base_/models/resnetv1d101.py
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/resnetv1d152.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/resnetv1d152.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/resnetv1d152.py
rename to openmmlab_test/mmclassification-0.24.1/configs/_base_/models/resnetv1d152.py
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/resnetv1d50.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/resnetv1d50.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/resnetv1d50.py
rename to openmmlab_test/mmclassification-0.24.1/configs/_base_/models/resnetv1d50.py
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/resnext101_32x4d.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/resnext101_32x4d.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/resnext101_32x4d.py
rename to openmmlab_test/mmclassification-0.24.1/configs/_base_/models/resnext101_32x4d.py
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/resnext101_32x8d.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/resnext101_32x8d.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/resnext101_32x8d.py
rename to openmmlab_test/mmclassification-0.24.1/configs/_base_/models/resnext101_32x8d.py
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/resnext152_32x4d.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/resnext152_32x4d.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/resnext152_32x4d.py
rename to openmmlab_test/mmclassification-0.24.1/configs/_base_/models/resnext152_32x4d.py
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/resnext50_32x4d.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/resnext50_32x4d.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/resnext50_32x4d.py
rename to openmmlab_test/mmclassification-0.24.1/configs/_base_/models/resnext50_32x4d.py
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/seresnet101.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/seresnet101.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/seresnet101.py
rename to openmmlab_test/mmclassification-0.24.1/configs/_base_/models/seresnet101.py
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/seresnet50.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/seresnet50.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/seresnet50.py
rename to openmmlab_test/mmclassification-0.24.1/configs/_base_/models/seresnet50.py
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/seresnext101_32x4d.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/seresnext101_32x4d.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/seresnext101_32x4d.py
rename to openmmlab_test/mmclassification-0.24.1/configs/_base_/models/seresnext101_32x4d.py
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/seresnext50_32x4d.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/seresnext50_32x4d.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/seresnext50_32x4d.py
rename to openmmlab_test/mmclassification-0.24.1/configs/_base_/models/seresnext50_32x4d.py
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/shufflenet_v1_1x.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/shufflenet_v1_1x.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/shufflenet_v1_1x.py
rename to openmmlab_test/mmclassification-0.24.1/configs/_base_/models/shufflenet_v1_1x.py
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/shufflenet_v2_1x.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/shufflenet_v2_1x.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/shufflenet_v2_1x.py
rename to openmmlab_test/mmclassification-0.24.1/configs/_base_/models/shufflenet_v2_1x.py
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/swin_transformer/base_224.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/swin_transformer/base_224.py
new file mode 100644
index 00000000..e16b4e60
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/swin_transformer/base_224.py
@@ -0,0 +1,22 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='SwinTransformer', arch='base', img_size=224, drop_path_rate=0.5),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=1024,
+ init_cfg=None, # suppress the default init_cfg of LinearClsHead.
+ loss=dict(
+ type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'),
+ cal_acc=False),
+ init_cfg=[
+ dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.),
+ dict(type='Constant', layer='LayerNorm', val=1., bias=0.)
+ ],
+ train_cfg=dict(augments=[
+ dict(type='BatchMixup', alpha=0.8, num_classes=1000, prob=0.5),
+ dict(type='BatchCutMix', alpha=1.0, num_classes=1000, prob=0.5)
+ ]))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/swin_transformer/base_384.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/swin_transformer/base_384.py
new file mode 100644
index 00000000..ce78981f
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/swin_transformer/base_384.py
@@ -0,0 +1,16 @@
+# model settings
+# Only for evaluation
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='SwinTransformer',
+ arch='base',
+ img_size=384,
+ stage_cfgs=dict(block_cfgs=dict(window_size=12))),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=1024,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5)))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/swin_transformer/large_224.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/swin_transformer/large_224.py
new file mode 100644
index 00000000..747d00e4
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/swin_transformer/large_224.py
@@ -0,0 +1,12 @@
+# model settings
+# Only for evaluation
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(type='SwinTransformer', arch='large', img_size=224),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=1536,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5)))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/swin_transformer/large_384.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/swin_transformer/large_384.py
new file mode 100644
index 00000000..7026f81a
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/swin_transformer/large_384.py
@@ -0,0 +1,16 @@
+# model settings
+# Only for evaluation
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='SwinTransformer',
+ arch='large',
+ img_size=384,
+ stage_cfgs=dict(block_cfgs=dict(window_size=12))),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=1536,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5)))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/swin_transformer/small_224.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/swin_transformer/small_224.py
new file mode 100644
index 00000000..78739866
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/swin_transformer/small_224.py
@@ -0,0 +1,23 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='SwinTransformer', arch='small', img_size=224,
+ drop_path_rate=0.3),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=768,
+ init_cfg=None, # suppress the default init_cfg of LinearClsHead.
+ loss=dict(
+ type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'),
+ cal_acc=False),
+ init_cfg=[
+ dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.),
+ dict(type='Constant', layer='LayerNorm', val=1., bias=0.)
+ ],
+ train_cfg=dict(augments=[
+ dict(type='BatchMixup', alpha=0.8, num_classes=1000, prob=0.5),
+ dict(type='BatchCutMix', alpha=1.0, num_classes=1000, prob=0.5)
+ ]))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/swin_transformer/tiny_224.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/swin_transformer/tiny_224.py
new file mode 100644
index 00000000..2d68d66b
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/swin_transformer/tiny_224.py
@@ -0,0 +1,22 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='SwinTransformer', arch='tiny', img_size=224, drop_path_rate=0.2),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=768,
+ init_cfg=None, # suppress the default init_cfg of LinearClsHead.
+ loss=dict(
+ type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'),
+ cal_acc=False),
+ init_cfg=[
+ dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.),
+ dict(type='Constant', layer='LayerNorm', val=1., bias=0.)
+ ],
+ train_cfg=dict(augments=[
+ dict(type='BatchMixup', alpha=0.8, num_classes=1000, prob=0.5),
+ dict(type='BatchCutMix', alpha=1.0, num_classes=1000, prob=0.5)
+ ]))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/swin_transformer_v2/base_256.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/swin_transformer_v2/base_256.py
new file mode 100644
index 00000000..f711a9c8
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/swin_transformer_v2/base_256.py
@@ -0,0 +1,25 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='SwinTransformerV2',
+ arch='base',
+ img_size=256,
+ drop_path_rate=0.5),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=1024,
+ init_cfg=None, # suppress the default init_cfg of LinearClsHead.
+ loss=dict(
+ type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'),
+ cal_acc=False),
+ init_cfg=[
+ dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.),
+ dict(type='Constant', layer='LayerNorm', val=1., bias=0.)
+ ],
+ train_cfg=dict(augments=[
+ dict(type='BatchMixup', alpha=0.8, num_classes=1000, prob=0.5),
+ dict(type='BatchCutMix', alpha=1.0, num_classes=1000, prob=0.5)
+ ]))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/swin_transformer_v2/base_384.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/swin_transformer_v2/base_384.py
new file mode 100644
index 00000000..5fb9aead
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/swin_transformer_v2/base_384.py
@@ -0,0 +1,17 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='SwinTransformerV2',
+ arch='base',
+ img_size=384,
+ drop_path_rate=0.2),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=1024,
+ init_cfg=None, # suppress the default init_cfg of LinearClsHead.
+ loss=dict(
+ type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'),
+ cal_acc=False))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/swin_transformer_v2/large_256.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/swin_transformer_v2/large_256.py
new file mode 100644
index 00000000..fe557c32
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/swin_transformer_v2/large_256.py
@@ -0,0 +1,16 @@
+# model settings
+# Only for evaluation
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='SwinTransformerV2',
+ arch='large',
+ img_size=256,
+ drop_path_rate=0.2),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=1536,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5)))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/swin_transformer_v2/large_384.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/swin_transformer_v2/large_384.py
new file mode 100644
index 00000000..a626c407
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/swin_transformer_v2/large_384.py
@@ -0,0 +1,16 @@
+# model settings
+# Only for evaluation
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='SwinTransformerV2',
+ arch='large',
+ img_size=384,
+ drop_path_rate=0.2),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=1536,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5)))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/swin_transformer_v2/small_256.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/swin_transformer_v2/small_256.py
new file mode 100644
index 00000000..8808f097
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/swin_transformer_v2/small_256.py
@@ -0,0 +1,25 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='SwinTransformerV2',
+ arch='small',
+ img_size=256,
+ drop_path_rate=0.3),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=768,
+ init_cfg=None, # suppress the default init_cfg of LinearClsHead.
+ loss=dict(
+ type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'),
+ cal_acc=False),
+ init_cfg=[
+ dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.),
+ dict(type='Constant', layer='LayerNorm', val=1., bias=0.)
+ ],
+ train_cfg=dict(augments=[
+ dict(type='BatchMixup', alpha=0.8, num_classes=1000, prob=0.5),
+ dict(type='BatchCutMix', alpha=1.0, num_classes=1000, prob=0.5)
+ ]))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/swin_transformer_v2/tiny_256.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/swin_transformer_v2/tiny_256.py
new file mode 100644
index 00000000..d40e3946
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/swin_transformer_v2/tiny_256.py
@@ -0,0 +1,25 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='SwinTransformerV2',
+ arch='tiny',
+ img_size=256,
+ drop_path_rate=0.2),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=768,
+ init_cfg=None, # suppress the default init_cfg of LinearClsHead.
+ loss=dict(
+ type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'),
+ cal_acc=False),
+ init_cfg=[
+ dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.),
+ dict(type='Constant', layer='LayerNorm', val=1., bias=0.)
+ ],
+ train_cfg=dict(augments=[
+ dict(type='BatchMixup', alpha=0.8, num_classes=1000, prob=0.5),
+ dict(type='BatchCutMix', alpha=1.0, num_classes=1000, prob=0.5)
+ ]))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/t2t-vit-t-14.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/t2t-vit-t-14.py
new file mode 100644
index 00000000..91dbb676
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/t2t-vit-t-14.py
@@ -0,0 +1,41 @@
+# model settings
+embed_dims = 384
+num_classes = 1000
+
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='T2T_ViT',
+ img_size=224,
+ in_channels=3,
+ embed_dims=embed_dims,
+ t2t_cfg=dict(
+ token_dims=64,
+ use_performer=False,
+ ),
+ num_layers=14,
+ layer_cfgs=dict(
+ num_heads=6,
+ feedforward_channels=3 * embed_dims, # mlp_ratio = 3
+ ),
+ drop_path_rate=0.1,
+ init_cfg=[
+ dict(type='TruncNormal', layer='Linear', std=.02),
+ dict(type='Constant', layer='LayerNorm', val=1., bias=0.),
+ ]),
+ neck=None,
+ head=dict(
+ type='VisionTransformerClsHead',
+ num_classes=num_classes,
+ in_channels=embed_dims,
+ loss=dict(
+ type='LabelSmoothLoss',
+ label_smooth_val=0.1,
+ mode='original',
+ ),
+ topk=(1, 5),
+ init_cfg=dict(type='TruncNormal', layer='Linear', std=.02)),
+ train_cfg=dict(augments=[
+ dict(type='BatchMixup', alpha=0.8, prob=0.5, num_classes=num_classes),
+ dict(type='BatchCutMix', alpha=1.0, prob=0.5, num_classes=num_classes),
+ ]))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/t2t-vit-t-19.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/t2t-vit-t-19.py
new file mode 100644
index 00000000..8ab139d6
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/t2t-vit-t-19.py
@@ -0,0 +1,41 @@
+# model settings
+embed_dims = 448
+num_classes = 1000
+
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='T2T_ViT',
+ img_size=224,
+ in_channels=3,
+ embed_dims=embed_dims,
+ t2t_cfg=dict(
+ token_dims=64,
+ use_performer=False,
+ ),
+ num_layers=19,
+ layer_cfgs=dict(
+ num_heads=7,
+ feedforward_channels=3 * embed_dims, # mlp_ratio = 3
+ ),
+ drop_path_rate=0.1,
+ init_cfg=[
+ dict(type='TruncNormal', layer='Linear', std=.02),
+ dict(type='Constant', layer='LayerNorm', val=1., bias=0.),
+ ]),
+ neck=None,
+ head=dict(
+ type='VisionTransformerClsHead',
+ num_classes=num_classes,
+ in_channels=embed_dims,
+ loss=dict(
+ type='LabelSmoothLoss',
+ label_smooth_val=0.1,
+ mode='original',
+ ),
+ topk=(1, 5),
+ init_cfg=dict(type='TruncNormal', layer='Linear', std=.02)),
+ train_cfg=dict(augments=[
+ dict(type='BatchMixup', alpha=0.8, prob=0.5, num_classes=num_classes),
+ dict(type='BatchCutMix', alpha=1.0, prob=0.5, num_classes=num_classes),
+ ]))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/t2t-vit-t-24.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/t2t-vit-t-24.py
new file mode 100644
index 00000000..5990960a
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/t2t-vit-t-24.py
@@ -0,0 +1,41 @@
+# model settings
+embed_dims = 512
+num_classes = 1000
+
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='T2T_ViT',
+ img_size=224,
+ in_channels=3,
+ embed_dims=embed_dims,
+ t2t_cfg=dict(
+ token_dims=64,
+ use_performer=False,
+ ),
+ num_layers=24,
+ layer_cfgs=dict(
+ num_heads=8,
+ feedforward_channels=3 * embed_dims, # mlp_ratio = 3
+ ),
+ drop_path_rate=0.1,
+ init_cfg=[
+ dict(type='TruncNormal', layer='Linear', std=.02),
+ dict(type='Constant', layer='LayerNorm', val=1., bias=0.),
+ ]),
+ neck=None,
+ head=dict(
+ type='VisionTransformerClsHead',
+ num_classes=num_classes,
+ in_channels=embed_dims,
+ loss=dict(
+ type='LabelSmoothLoss',
+ label_smooth_val=0.1,
+ mode='original',
+ ),
+ topk=(1, 5),
+ init_cfg=dict(type='TruncNormal', layer='Linear', std=.02)),
+ train_cfg=dict(augments=[
+ dict(type='BatchMixup', alpha=0.8, prob=0.5, num_classes=num_classes),
+ dict(type='BatchCutMix', alpha=1.0, prob=0.5, num_classes=num_classes),
+ ]))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/tnt_s_patch16_224.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/tnt_s_patch16_224.py
new file mode 100644
index 00000000..5e13d078
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/tnt_s_patch16_224.py
@@ -0,0 +1,29 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='TNT',
+ arch='s',
+ img_size=224,
+ patch_size=16,
+ in_channels=3,
+ ffn_ratio=4,
+ qkv_bias=False,
+ drop_rate=0.,
+ attn_drop_rate=0.,
+ drop_path_rate=0.1,
+ first_stride=4,
+ num_fcs=2,
+ init_cfg=[
+ dict(type='TruncNormal', layer='Linear', std=.02),
+ dict(type='Constant', layer='LayerNorm', val=1., bias=0.)
+ ]),
+ neck=None,
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=384,
+ loss=dict(
+ type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'),
+ topk=(1, 5),
+ init_cfg=dict(type='TruncNormal', layer='Linear', std=.02)))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/twins_pcpvt_base.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/twins_pcpvt_base.py
new file mode 100644
index 00000000..473d7ee8
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/twins_pcpvt_base.py
@@ -0,0 +1,30 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='PCPVT',
+ arch='base',
+ in_channels=3,
+ out_indices=(3, ),
+ qkv_bias=True,
+ norm_cfg=dict(type='LN', eps=1e-06),
+ norm_after_stage=[False, False, False, True],
+ drop_rate=0.0,
+ attn_drop_rate=0.,
+ drop_path_rate=0.3),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=512,
+ loss=dict(
+ type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'),
+ cal_acc=False),
+ init_cfg=[
+ dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.),
+ dict(type='Constant', layer='LayerNorm', val=1., bias=0.)
+ ],
+ train_cfg=dict(augments=[
+ dict(type='BatchMixup', alpha=0.8, num_classes=1000, prob=0.5),
+ dict(type='BatchCutMix', alpha=1.0, num_classes=1000, prob=0.5)
+ ]))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/twins_svt_base.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/twins_svt_base.py
new file mode 100644
index 00000000..cabd3739
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/twins_svt_base.py
@@ -0,0 +1,30 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='SVT',
+ arch='base',
+ in_channels=3,
+ out_indices=(3, ),
+ qkv_bias=True,
+ norm_cfg=dict(type='LN'),
+ norm_after_stage=[False, False, False, True],
+ drop_rate=0.0,
+ attn_drop_rate=0.,
+ drop_path_rate=0.3),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=768,
+ loss=dict(
+ type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'),
+ cal_acc=False),
+ init_cfg=[
+ dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.),
+ dict(type='Constant', layer='LayerNorm', val=1., bias=0.)
+ ],
+ train_cfg=dict(augments=[
+ dict(type='BatchMixup', alpha=0.8, num_classes=1000, prob=0.5),
+ dict(type='BatchCutMix', alpha=1.0, num_classes=1000, prob=0.5)
+ ]))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/van/van_b0.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/van/van_b0.py
new file mode 100644
index 00000000..5fa977e7
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/van/van_b0.py
@@ -0,0 +1,21 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(type='VAN', arch='b0', drop_path_rate=0.1),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=256,
+ init_cfg=None, # suppress the default init_cfg of LinearClsHead.
+ loss=dict(
+ type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'),
+ cal_acc=False),
+ init_cfg=[
+ dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.),
+ dict(type='Constant', layer='LayerNorm', val=1., bias=0.)
+ ],
+ train_cfg=dict(augments=[
+ dict(type='BatchMixup', alpha=0.8, num_classes=1000, prob=0.5),
+ dict(type='BatchCutMix', alpha=1.0, num_classes=1000, prob=0.5)
+ ]))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/van/van_b1.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/van/van_b1.py
new file mode 100644
index 00000000..a27a50b1
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/van/van_b1.py
@@ -0,0 +1,21 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(type='VAN', arch='b1', drop_path_rate=0.1),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=512,
+ init_cfg=None, # suppress the default init_cfg of LinearClsHead.
+ loss=dict(
+ type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'),
+ cal_acc=False),
+ init_cfg=[
+ dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.),
+ dict(type='Constant', layer='LayerNorm', val=1., bias=0.)
+ ],
+ train_cfg=dict(augments=[
+ dict(type='BatchMixup', alpha=0.8, num_classes=1000, prob=0.5),
+ dict(type='BatchCutMix', alpha=1.0, num_classes=1000, prob=0.5)
+ ]))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/van/van_b2.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/van/van_b2.py
new file mode 100644
index 00000000..41b0484f
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/van/van_b2.py
@@ -0,0 +1,13 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(type='VAN', arch='b2', drop_path_rate=0.1),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=512,
+ init_cfg=None, # suppress the default init_cfg of LinearClsHead.
+ loss=dict(
+ type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'),
+ cal_acc=False))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/van/van_b3.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/van/van_b3.py
new file mode 100644
index 00000000..d32b12cc
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/van/van_b3.py
@@ -0,0 +1,13 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(type='VAN', arch='b3', drop_path_rate=0.2),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=512,
+ init_cfg=None, # suppress the default init_cfg of LinearClsHead.
+ loss=dict(
+ type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'),
+ cal_acc=False))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/van/van_b4.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/van/van_b4.py
new file mode 100644
index 00000000..417835c9
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/van/van_b4.py
@@ -0,0 +1,13 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(type='VAN', arch='b4', drop_path_rate=0.2),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=512,
+ init_cfg=None, # suppress the default init_cfg of LinearClsHead.
+ loss=dict(
+ type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'),
+ cal_acc=False))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/van/van_b5.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/van/van_b5.py
new file mode 100644
index 00000000..fe8b9236
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/van/van_b5.py
@@ -0,0 +1,13 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(type='VAN', arch='b5', drop_path_rate=0.2),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=768,
+ init_cfg=None, # suppress the default init_cfg of LinearClsHead.
+ loss=dict(
+ type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'),
+ cal_acc=False))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/van/van_b6.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/van/van_b6.py
new file mode 100644
index 00000000..a0dfb3c7
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/van/van_b6.py
@@ -0,0 +1,13 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(type='VAN', arch='b6', drop_path_rate=0.3),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=768,
+ init_cfg=None, # suppress the default init_cfg of LinearClsHead.
+ loss=dict(
+ type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'),
+ cal_acc=False))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/van/van_base.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/van/van_base.py
new file mode 100644
index 00000000..5c2bcf0e
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/van/van_base.py
@@ -0,0 +1 @@
+_base_ = ['./van-b2.py']
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/van/van_large.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/van/van_large.py
new file mode 100644
index 00000000..bc9536c6
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/van/van_large.py
@@ -0,0 +1 @@
+_base_ = ['./van-b3.py']
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/van/van_small.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/van/van_small.py
new file mode 100644
index 00000000..3973c22a
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/van/van_small.py
@@ -0,0 +1 @@
+_base_ = ['./van-b1.py']
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/van/van_tiny.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/van/van_tiny.py
new file mode 100644
index 00000000..ace9ebbb
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/van/van_tiny.py
@@ -0,0 +1 @@
+_base_ = ['./van-b0.py']
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/vgg11.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/vgg11.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/vgg11.py
rename to openmmlab_test/mmclassification-0.24.1/configs/_base_/models/vgg11.py
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/vgg11bn.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/vgg11bn.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/vgg11bn.py
rename to openmmlab_test/mmclassification-0.24.1/configs/_base_/models/vgg11bn.py
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/vgg13.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/vgg13.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/vgg13.py
rename to openmmlab_test/mmclassification-0.24.1/configs/_base_/models/vgg13.py
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/vgg13bn.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/vgg13bn.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/vgg13bn.py
rename to openmmlab_test/mmclassification-0.24.1/configs/_base_/models/vgg13bn.py
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/vgg16.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/vgg16.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/vgg16.py
rename to openmmlab_test/mmclassification-0.24.1/configs/_base_/models/vgg16.py
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/vgg16bn.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/vgg16bn.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/vgg16bn.py
rename to openmmlab_test/mmclassification-0.24.1/configs/_base_/models/vgg16bn.py
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/vgg19.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/vgg19.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/vgg19.py
rename to openmmlab_test/mmclassification-0.24.1/configs/_base_/models/vgg19.py
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/vgg19bn.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/vgg19bn.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/_base_/models/vgg19bn.py
rename to openmmlab_test/mmclassification-0.24.1/configs/_base_/models/vgg19bn.py
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/vit-base-p16.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/vit-base-p16.py
new file mode 100644
index 00000000..bb42bed5
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/vit-base-p16.py
@@ -0,0 +1,25 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='VisionTransformer',
+ arch='b',
+ img_size=224,
+ patch_size=16,
+ drop_rate=0.1,
+ init_cfg=[
+ dict(
+ type='Kaiming',
+ layer='Conv2d',
+ mode='fan_in',
+ nonlinearity='linear')
+ ]),
+ neck=None,
+ head=dict(
+ type='VisionTransformerClsHead',
+ num_classes=1000,
+ in_channels=768,
+ loss=dict(
+ type='LabelSmoothLoss', label_smooth_val=0.1,
+ mode='classy_vision'),
+ ))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/vit-base-p32.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/vit-base-p32.py
new file mode 100644
index 00000000..ad550ef9
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/vit-base-p32.py
@@ -0,0 +1,24 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='VisionTransformer',
+ arch='b',
+ img_size=224,
+ patch_size=32,
+ drop_rate=0.1,
+ init_cfg=[
+ dict(
+ type='Kaiming',
+ layer='Conv2d',
+ mode='fan_in',
+ nonlinearity='linear')
+ ]),
+ neck=None,
+ head=dict(
+ type='VisionTransformerClsHead',
+ num_classes=1000,
+ in_channels=768,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/vit-large-p16.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/vit-large-p16.py
new file mode 100644
index 00000000..97162304
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/vit-large-p16.py
@@ -0,0 +1,24 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='VisionTransformer',
+ arch='l',
+ img_size=224,
+ patch_size=16,
+ drop_rate=0.1,
+ init_cfg=[
+ dict(
+ type='Kaiming',
+ layer='Conv2d',
+ mode='fan_in',
+ nonlinearity='linear')
+ ]),
+ neck=None,
+ head=dict(
+ type='VisionTransformerClsHead',
+ num_classes=1000,
+ in_channels=1024,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/vit-large-p32.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/vit-large-p32.py
new file mode 100644
index 00000000..f9491bb5
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/vit-large-p32.py
@@ -0,0 +1,24 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='VisionTransformer',
+ arch='l',
+ img_size=224,
+ patch_size=32,
+ drop_rate=0.1,
+ init_cfg=[
+ dict(
+ type='Kaiming',
+ layer='Conv2d',
+ mode='fan_in',
+ nonlinearity='linear')
+ ]),
+ neck=None,
+ head=dict(
+ type='VisionTransformerClsHead',
+ num_classes=1000,
+ in_channels=1024,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/wide-resnet50.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/wide-resnet50.py
new file mode 100644
index 00000000..a2913b9a
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/models/wide-resnet50.py
@@ -0,0 +1,20 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='ResNet',
+ depth=50,
+ num_stages=4,
+ out_indices=(3, ),
+ stem_channels=64,
+ base_channels=128,
+ expansion=2,
+ style='pytorch'),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=2048,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ))
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/_base_/schedules/cifar10_bs128.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/schedules/cifar10_bs128.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/_base_/schedules/cifar10_bs128.py
rename to openmmlab_test/mmclassification-0.24.1/configs/_base_/schedules/cifar10_bs128.py
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/schedules/cub_bs64.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/schedules/cub_bs64.py
new file mode 100644
index 00000000..93cce6a7
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/schedules/cub_bs64.py
@@ -0,0 +1,13 @@
+# optimizer
+optimizer = dict(
+ type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005, nesterov=True)
+optimizer_config = dict(grad_clip=None)
+# learning policy
+lr_config = dict(
+ policy='CosineAnnealing',
+ min_lr=0,
+ warmup='linear',
+ warmup_iters=5,
+ warmup_ratio=0.01,
+ warmup_by_epoch=True)
+runner = dict(type='EpochBasedRunner', max_epochs=100)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/schedules/imagenet_bs1024_adamw_conformer.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/schedules/imagenet_bs1024_adamw_conformer.py
new file mode 100644
index 00000000..92f18017
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/schedules/imagenet_bs1024_adamw_conformer.py
@@ -0,0 +1,29 @@
+paramwise_cfg = dict(
+ norm_decay_mult=0.0,
+ bias_decay_mult=0.0,
+ custom_keys={
+ '.cls_token': dict(decay_mult=0.0),
+ })
+
+# for batch in each gpu is 128, 8 gpu
+# lr = 5e-4 * 128 * 8 / 512 = 0.001
+optimizer = dict(
+ type='AdamW',
+ lr=5e-4 * 128 * 8 / 512,
+ weight_decay=0.05,
+ eps=1e-8,
+ betas=(0.9, 0.999),
+ paramwise_cfg=paramwise_cfg)
+optimizer_config = dict(grad_clip=None)
+
+# learning policy
+lr_config = dict(
+ policy='CosineAnnealing',
+ by_epoch=False,
+ min_lr_ratio=1e-2,
+ warmup='linear',
+ warmup_ratio=1e-3,
+ warmup_iters=5 * 1252,
+ warmup_by_epoch=False)
+
+runner = dict(type='EpochBasedRunner', max_epochs=300)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/schedules/imagenet_bs1024_adamw_swin.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/schedules/imagenet_bs1024_adamw_swin.py
new file mode 100644
index 00000000..2ad035cb
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/schedules/imagenet_bs1024_adamw_swin.py
@@ -0,0 +1,30 @@
+paramwise_cfg = dict(
+ norm_decay_mult=0.0,
+ bias_decay_mult=0.0,
+ custom_keys={
+ '.absolute_pos_embed': dict(decay_mult=0.0),
+ '.relative_position_bias_table': dict(decay_mult=0.0)
+ })
+
+# for batch in each gpu is 128, 8 gpu
+# lr = 5e-4 * 128 * 8 / 512 = 0.001
+optimizer = dict(
+ type='AdamW',
+ lr=5e-4 * 1024 / 512,
+ weight_decay=0.05,
+ eps=1e-8,
+ betas=(0.9, 0.999),
+ paramwise_cfg=paramwise_cfg)
+optimizer_config = dict(grad_clip=dict(max_norm=5.0))
+
+# learning policy
+lr_config = dict(
+ policy='CosineAnnealing',
+ by_epoch=False,
+ min_lr_ratio=1e-2,
+ warmup='linear',
+ warmup_ratio=1e-3,
+ warmup_iters=20,
+ warmup_by_epoch=True)
+
+runner = dict(type='EpochBasedRunner', max_epochs=300)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/schedules/imagenet_bs1024_coslr.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/schedules/imagenet_bs1024_coslr.py
new file mode 100644
index 00000000..ee84e7a6
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/schedules/imagenet_bs1024_coslr.py
@@ -0,0 +1,12 @@
+# optimizer
+optimizer = dict(type='SGD', lr=0.8, momentum=0.9, weight_decay=5e-5)
+optimizer_config = dict(grad_clip=None)
+# learning policy
+lr_config = dict(
+ policy='CosineAnnealing',
+ min_lr=0,
+ warmup='linear',
+ warmup_iters=5,
+ warmup_ratio=0.1,
+ warmup_by_epoch=True)
+runner = dict(type='EpochBasedRunner', max_epochs=100)
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/_base_/schedules/imagenet_bs1024_linearlr_bn_nowd.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/schedules/imagenet_bs1024_linearlr_bn_nowd.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/_base_/schedules/imagenet_bs1024_linearlr_bn_nowd.py
rename to openmmlab_test/mmclassification-0.24.1/configs/_base_/schedules/imagenet_bs1024_linearlr_bn_nowd.py
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/_base_/schedules/imagenet_bs2048.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/schedules/imagenet_bs2048.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/_base_/schedules/imagenet_bs2048.py
rename to openmmlab_test/mmclassification-0.24.1/configs/_base_/schedules/imagenet_bs2048.py
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/_base_/schedules/imagenet_bs2048_AdamW.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/schedules/imagenet_bs2048_AdamW.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/_base_/schedules/imagenet_bs2048_AdamW.py
rename to openmmlab_test/mmclassification-0.24.1/configs/_base_/schedules/imagenet_bs2048_AdamW.py
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/_base_/schedules/imagenet_bs2048_coslr.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/schedules/imagenet_bs2048_coslr.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/_base_/schedules/imagenet_bs2048_coslr.py
rename to openmmlab_test/mmclassification-0.24.1/configs/_base_/schedules/imagenet_bs2048_coslr.py
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/schedules/imagenet_bs2048_rsb.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/schedules/imagenet_bs2048_rsb.py
new file mode 100644
index 00000000..e021cb0f
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/schedules/imagenet_bs2048_rsb.py
@@ -0,0 +1,12 @@
+# optimizer
+optimizer = dict(type='Lamb', lr=0.005, weight_decay=0.02)
+optimizer_config = dict(grad_clip=None)
+# learning policy
+lr_config = dict(
+ policy='CosineAnnealing',
+ min_lr=1.0e-6,
+ warmup='linear',
+ # For ImageNet-1k, 626 iters per epoch, warmup 5 epochs.
+ warmup_iters=5 * 626,
+ warmup_ratio=0.0001)
+runner = dict(type='EpochBasedRunner', max_epochs=100)
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/_base_/schedules/imagenet_bs256.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/schedules/imagenet_bs256.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/_base_/schedules/imagenet_bs256.py
rename to openmmlab_test/mmclassification-0.24.1/configs/_base_/schedules/imagenet_bs256.py
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/_base_/schedules/imagenet_bs256_140e.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/schedules/imagenet_bs256_140e.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/_base_/schedules/imagenet_bs256_140e.py
rename to openmmlab_test/mmclassification-0.24.1/configs/_base_/schedules/imagenet_bs256_140e.py
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/schedules/imagenet_bs256_200e_coslr_warmup.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/schedules/imagenet_bs256_200e_coslr_warmup.py
new file mode 100644
index 00000000..49456b2c
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/schedules/imagenet_bs256_200e_coslr_warmup.py
@@ -0,0 +1,11 @@
+# optimizer
+optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0001)
+optimizer_config = dict(grad_clip=None)
+# learning policy
+lr_config = dict(
+ policy='CosineAnnealing',
+ min_lr=0,
+ warmup='linear',
+ warmup_iters=25025,
+ warmup_ratio=0.25)
+runner = dict(type='EpochBasedRunner', max_epochs=200)
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/_base_/schedules/imagenet_bs256_coslr.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/schedules/imagenet_bs256_coslr.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/_base_/schedules/imagenet_bs256_coslr.py
rename to openmmlab_test/mmclassification-0.24.1/configs/_base_/schedules/imagenet_bs256_coslr.py
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/_base_/schedules/imagenet_bs256_epochstep.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/schedules/imagenet_bs256_epochstep.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/_base_/schedules/imagenet_bs256_epochstep.py
rename to openmmlab_test/mmclassification-0.24.1/configs/_base_/schedules/imagenet_bs256_epochstep.py
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/schedules/imagenet_bs4096_AdamW.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/schedules/imagenet_bs4096_AdamW.py
new file mode 100644
index 00000000..75b00d80
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/schedules/imagenet_bs4096_AdamW.py
@@ -0,0 +1,24 @@
+# specific to vit pretrain
+paramwise_cfg = dict(custom_keys={
+ '.cls_token': dict(decay_mult=0.0),
+ '.pos_embed': dict(decay_mult=0.0)
+})
+
+# optimizer
+optimizer = dict(
+ type='AdamW',
+ lr=0.003,
+ weight_decay=0.3,
+ paramwise_cfg=paramwise_cfg,
+)
+optimizer_config = dict(grad_clip=dict(max_norm=1.0))
+
+# learning policy
+lr_config = dict(
+ policy='CosineAnnealing',
+ min_lr=0,
+ warmup='linear',
+ warmup_iters=10000,
+ warmup_ratio=1e-4,
+)
+runner = dict(type='EpochBasedRunner', max_epochs=300)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/_base_/schedules/stanford_cars_bs8.py b/openmmlab_test/mmclassification-0.24.1/configs/_base_/schedules/stanford_cars_bs8.py
new file mode 100644
index 00000000..dee252ec
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/_base_/schedules/stanford_cars_bs8.py
@@ -0,0 +1,7 @@
+# optimizer
+optimizer = dict(
+ type='SGD', lr=0.003, momentum=0.9, weight_decay=0.0005, nesterov=True)
+optimizer_config = dict(grad_clip=None)
+# learning policy
+lr_config = dict(policy='step', step=[40, 70, 90])
+runner = dict(type='EpochBasedRunner', max_epochs=100)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/conformer/README.md b/openmmlab_test/mmclassification-0.24.1/configs/conformer/README.md
new file mode 100644
index 00000000..5b7d96b7
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/conformer/README.md
@@ -0,0 +1,37 @@
+# Conformer
+
+> [Conformer: Local Features Coupling Global Representations for Visual Recognition](https://arxiv.org/abs/2105.03889)
+
+
+
+## Abstract
+
+Within Convolutional Neural Network (CNN), the convolution operations are good at extracting local features but experience difficulty to capture global representations. Within visual transformer, the cascaded self-attention modules can capture long-distance feature dependencies but unfortunately deteriorate local feature details. In this paper, we propose a hybrid network structure, termed Conformer, to take advantage of convolutional operations and self-attention mechanisms for enhanced representation learning. Conformer roots in the Feature Coupling Unit (FCU), which fuses local features and global representations under different resolutions in an interactive fashion. Conformer adopts a concurrent structure so that local features and global representations are retained to the maximum extent. Experiments show that Conformer, under the comparable parameter complexity, outperforms the visual transformer (DeiT-B) by 2.3% on ImageNet. On MSCOCO, it outperforms ResNet-101 by 3.7% and 3.6% mAPs for object detection and instance segmentation, respectively, demonstrating the great potential to be a general backbone network.
+
+
+

+
+
+## Results and models
+
+### ImageNet-1k
+
+| Model | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download |
+| :-------------------: | :-------: | :------: | :-------: | :-------: | :---------------------------------------------------------------------: | :-----------------------------------------------------------------------: |
+| Conformer-tiny-p16\* | 23.52 | 4.90 | 81.31 | 95.60 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/conformer/conformer-tiny-p16_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/conformer/conformer-tiny-p16_3rdparty_8xb128_in1k_20211206-f6860372.pth) |
+| Conformer-small-p32\* | 38.85 | 7.09 | 81.96 | 96.02 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/conformer/conformer-small-p32_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/conformer/conformer-small-p32_8xb128_in1k_20211206-947a0816.pth) |
+| Conformer-small-p16\* | 37.67 | 10.31 | 83.32 | 96.46 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/conformer/conformer-small-p16_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/conformer/conformer-small-p16_3rdparty_8xb128_in1k_20211206-3065dcf5.pth) |
+| Conformer-base-p16\* | 83.29 | 22.89 | 83.82 | 96.59 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/conformer/conformer-base-p16_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/conformer/conformer-base-p16_3rdparty_8xb128_in1k_20211206-bfdf8637.pth) |
+
+*Models with * are converted from the [official repo](https://github.com/pengzhiliang/Conformer). The config files of these models are only for validation. We don't ensure these config files' training accuracy and welcome you to contribute your reproduction results.*
+
+## Citation
+
+```
+@article{peng2021conformer,
+ title={Conformer: Local Features Coupling Global Representations for Visual Recognition},
+ author={Zhiliang Peng and Wei Huang and Shanzhi Gu and Lingxi Xie and Yaowei Wang and Jianbin Jiao and Qixiang Ye},
+ journal={arXiv preprint arXiv:2105.03889},
+ year={2021},
+}
+```
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/conformer/conformer-base-p16_8xb128_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/conformer/conformer-base-p16_8xb128_in1k.py
new file mode 100644
index 00000000..29ed58be
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/conformer/conformer-base-p16_8xb128_in1k.py
@@ -0,0 +1,9 @@
+_base_ = [
+ '../_base_/models/conformer/base-p16.py',
+ '../_base_/datasets/imagenet_bs64_swin_224.py',
+ '../_base_/schedules/imagenet_bs1024_adamw_conformer.py',
+ '../_base_/default_runtime.py'
+]
+
+data = dict(samples_per_gpu=128)
+evaluation = dict(interval=1, metric='accuracy')
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/conformer/conformer-small-p16_8xb128_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/conformer/conformer-small-p16_8xb128_in1k.py
new file mode 100644
index 00000000..c40ed041
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/conformer/conformer-small-p16_8xb128_in1k.py
@@ -0,0 +1,9 @@
+_base_ = [
+ '../_base_/models/conformer/small-p16.py',
+ '../_base_/datasets/imagenet_bs64_swin_224.py',
+ '../_base_/schedules/imagenet_bs1024_adamw_conformer.py',
+ '../_base_/default_runtime.py'
+]
+
+data = dict(samples_per_gpu=128)
+evaluation = dict(interval=1, metric='accuracy')
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/conformer/conformer-small-p32_8xb128_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/conformer/conformer-small-p32_8xb128_in1k.py
new file mode 100644
index 00000000..aaa11895
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/conformer/conformer-small-p32_8xb128_in1k.py
@@ -0,0 +1,9 @@
+_base_ = [
+ '../_base_/models/conformer/small-p32.py',
+ '../_base_/datasets/imagenet_bs64_swin_224.py',
+ '../_base_/schedules/imagenet_bs1024_adamw_conformer.py',
+ '../_base_/default_runtime.py'
+]
+
+data = dict(samples_per_gpu=128)
+evaluation = dict(interval=1, metric='accuracy')
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/conformer/conformer-tiny-p16_8xb128_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/conformer/conformer-tiny-p16_8xb128_in1k.py
new file mode 100644
index 00000000..76a264c6
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/conformer/conformer-tiny-p16_8xb128_in1k.py
@@ -0,0 +1,9 @@
+_base_ = [
+ '../_base_/models/conformer/tiny-p16.py',
+ '../_base_/datasets/imagenet_bs64_swin_224.py',
+ '../_base_/schedules/imagenet_bs1024_adamw_conformer.py',
+ '../_base_/default_runtime.py'
+]
+
+data = dict(samples_per_gpu=128)
+evaluation = dict(interval=1, metric='accuracy')
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/conformer/metafile.yml b/openmmlab_test/mmclassification-0.24.1/configs/conformer/metafile.yml
new file mode 100644
index 00000000..4efe05fb
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/conformer/metafile.yml
@@ -0,0 +1,78 @@
+Collections:
+ - Name: Conformer
+ Metadata:
+ Training Data: ImageNet-1k
+ Architecture:
+ - Layer Normalization
+ - Scaled Dot-Product Attention
+ - Dropout
+ Paper:
+ URL: https://arxiv.org/abs/2105.03889
+ Title: "Conformer: Local Features Coupling Global Representations for Visual Recognition"
+ README: configs/conformer/README.md
+ Code:
+ URL: https://github.com/open-mmlab/mmclassification/blob/v0.19.0/mmcls/models/backbones/conformer.py
+ Version: v0.19.0
+
+Models:
+ - Name: conformer-tiny-p16_3rdparty_8xb128_in1k
+ In Collection: Conformer
+ Config: configs/conformer/conformer-tiny-p16_8xb128_in1k.py
+ Metadata:
+ FLOPs: 4899611328
+ Parameters: 23524704
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 81.31
+ Top 5 Accuracy: 95.60
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/conformer/conformer-tiny-p16_3rdparty_8xb128_in1k_20211206-f6860372.pth
+ Converted From:
+ Weights: https://drive.google.com/file/d/19SxGhKcWOR5oQSxNUWUM2MGYiaWMrF1z/view?usp=sharing
+ Code: https://github.com/pengzhiliang/Conformer/blob/main/models.py#L65
+ - Name: conformer-small-p16_3rdparty_8xb128_in1k
+ In Collection: Conformer
+ Config: configs/conformer/conformer-small-p16_8xb128_in1k.py
+ Metadata:
+ FLOPs: 10311309312
+ Parameters: 37673424
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 83.32
+ Top 5 Accuracy: 96.46
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/conformer/conformer-small-p16_3rdparty_8xb128_in1k_20211206-3065dcf5.pth
+ Converted From:
+ Weights: https://drive.google.com/file/d/1mpOlbLaVxOfEwV4-ha78j_1Ebqzj2B83/view?usp=sharing
+ Code: https://github.com/pengzhiliang/Conformer/blob/main/models.py#L73
+ - Name: conformer-small-p32_8xb128_in1k
+ In Collection: Conformer
+ Config: configs/conformer/conformer-small-p32_8xb128_in1k.py
+ Metadata:
+ FLOPs: 7087281792
+ Parameters: 38853072
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 81.96
+ Top 5 Accuracy: 96.02
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/conformer/conformer-small-p32_8xb128_in1k_20211206-947a0816.pth
+ - Name: conformer-base-p16_3rdparty_8xb128_in1k
+ In Collection: Conformer
+ Config: configs/conformer/conformer-base-p16_8xb128_in1k.py
+ Metadata:
+ FLOPs: 22892078080
+ Parameters: 83289136
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 83.82
+ Top 5 Accuracy: 96.59
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/conformer/conformer-base-p16_3rdparty_8xb128_in1k_20211206-bfdf8637.pth
+ Converted From:
+ Weights: https://drive.google.com/file/d/1oeQ9LSOGKEUaYGu7WTlUGl3KDsQIi0MA/view?usp=sharing
+ Code: https://github.com/pengzhiliang/Conformer/blob/main/models.py#L89
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/convmixer/README.md b/openmmlab_test/mmclassification-0.24.1/configs/convmixer/README.md
new file mode 100644
index 00000000..763bad3c
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/convmixer/README.md
@@ -0,0 +1,42 @@
+# ConvMixer
+
+> [Patches Are All You Need?](https://arxiv.org/abs/2201.09792)
+
+
+
+## Abstract
+
+
+
+Although convolutional networks have been the dominant architecture for vision tasks for many years, recent experiments have shown that Transformer-based models, most notably the Vision Transformer (ViT), may exceed their performance in some settings. However, due to the quadratic runtime of the self-attention layers in Transformers, ViTs require the use of patch embeddings, which group together small regions of the image into single input features, in order to be applied to larger image sizes. This raises a question: Is the performance of ViTs due to the inherently-more-powerful Transformer architecture, or is it at least partly due to using patches as the input representation? In this paper, we present some evidence for the latter: specifically, we propose the ConvMixer, an extremely simple model that is similar in spirit to the ViT and the even-more-basic MLP-Mixer in that it operates directly on patches as input, separates the mixing of spatial and channel dimensions, and maintains equal size and resolution throughout the network. In contrast, however, the ConvMixer uses only standard convolutions to achieve the mixing steps. Despite its simplicity, we show that the ConvMixer outperforms the ViT, MLP-Mixer, and some of their variants for similar parameter counts and data set sizes, in addition to outperforming classical vision models such as the ResNet.
+
+
+
+
+

+
+
+## Results and models
+
+### ImageNet-1k
+
+| Model | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download |
+| :-----------------: | :-------: | :------: | :-------: | :-------: | :----------------------------------------------------------------------: | :------------------------------------------------------------------------: |
+| ConvMixer-768/32\* | 21.11 | 19.62 | 80.16 | 95.08 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/convmixer/convmixer-768-32_10xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/convmixer/convmixer-768-32_3rdparty_10xb64_in1k_20220323-bca1f7b8.pth) |
+| ConvMixer-1024/20\* | 24.38 | 5.55 | 76.94 | 93.36 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/convmixer/convmixer-1024-20_10xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/convmixer/convmixer-1024-20_3rdparty_10xb64_in1k_20220323-48f8aeba.pth) |
+| ConvMixer-1536/20\* | 51.63 | 48.71 | 81.37 | 95.61 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/convmixer/convmixer-1536-20_10xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/convmixer/convmixer-1536_20_3rdparty_10xb64_in1k_20220323-ea5786f3.pth) |
+
+*Models with * are converted from the [official repo](https://github.com/locuslab/convmixer). The config files of these models are only for inference. We don't ensure these config files' training accuracy and welcome you to contribute your reproduction results.*
+
+## Citation
+
+```bibtex
+@misc{trockman2022patches,
+ title={Patches Are All You Need?},
+ author={Asher Trockman and J. Zico Kolter},
+ year={2022},
+ eprint={2201.09792},
+ archivePrefix={arXiv},
+ primaryClass={cs.CV}
+}
+```
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/convmixer/convmixer-1024-20_10xb64_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/convmixer/convmixer-1024-20_10xb64_in1k.py
new file mode 100644
index 00000000..58694d6e
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/convmixer/convmixer-1024-20_10xb64_in1k.py
@@ -0,0 +1,10 @@
+_base_ = [
+ '../_base_/models/convmixer/convmixer-1024-20.py',
+ '../_base_/datasets/imagenet_bs64_convmixer_224.py',
+ '../_base_/schedules/imagenet_bs1024_adamw_swin.py',
+ '../_base_/default_runtime.py',
+]
+
+optimizer = dict(lr=0.01)
+
+runner = dict(type='EpochBasedRunner', max_epochs=150)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/convmixer/convmixer-1536-20_10xb64_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/convmixer/convmixer-1536-20_10xb64_in1k.py
new file mode 100644
index 00000000..17a75595
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/convmixer/convmixer-1536-20_10xb64_in1k.py
@@ -0,0 +1,10 @@
+_base_ = [
+ '../_base_/models/convmixer/convmixer-1536-20.py',
+ '../_base_/datasets/imagenet_bs64_convmixer_224.py',
+ '../_base_/schedules/imagenet_bs1024_adamw_swin.py',
+ '../_base_/default_runtime.py',
+]
+
+optimizer = dict(lr=0.01)
+
+runner = dict(type='EpochBasedRunner', max_epochs=150)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/convmixer/convmixer-768-32_10xb64_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/convmixer/convmixer-768-32_10xb64_in1k.py
new file mode 100644
index 00000000..fa4c0602
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/convmixer/convmixer-768-32_10xb64_in1k.py
@@ -0,0 +1,10 @@
+_base_ = [
+ '../_base_/models/convmixer/convmixer-768-32.py',
+ '../_base_/datasets/imagenet_bs64_convmixer_224.py',
+ '../_base_/schedules/imagenet_bs1024_adamw_swin.py',
+ '../_base_/default_runtime.py',
+]
+
+optimizer = dict(lr=0.01)
+
+runner = dict(type='EpochBasedRunner', max_epochs=300)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/convmixer/metafile.yml b/openmmlab_test/mmclassification-0.24.1/configs/convmixer/metafile.yml
new file mode 100644
index 00000000..7831d746
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/convmixer/metafile.yml
@@ -0,0 +1,61 @@
+Collections:
+ - Name: ConvMixer
+ Metadata:
+ Training Data: ImageNet-1k
+ Architecture:
+ - 1x1 Convolution
+ - LayerScale
+ Paper:
+ URL: https://arxiv.org/abs/2201.09792
+ Title: Patches Are All You Need?
+ README: configs/convmixer/README.md
+
+Models:
+ - Name: convmixer-768-32_10xb64_in1k
+ Metadata:
+ FLOPs: 19623051264
+ Parameters: 21110248
+ In Collections: ConvMixer
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 80.16
+ Top 5 Accuracy: 95.08
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/convmixer/convmixer-768-32_3rdparty_10xb64_in1k_20220323-bca1f7b8.pth
+ Config: configs/convmixer/convmixer-768-32_10xb64_in1k.py
+ Converted From:
+ Weights: https://github.com/tmp-iclr/convmixer/releases/download/v1.0/convmixer_768_32_ks7_p7_relu.pth.tar
+ Code: https://github.com/locuslab/convmixer
+ - Name: convmixer-1024-20_10xb64_in1k
+ Metadata:
+ FLOPs: 5550112768
+ Parameters: 24383464
+ In Collections: ConvMixer
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 76.94
+ Top 5 Accuracy: 93.36
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/convmixer/convmixer-1024-20_3rdparty_10xb64_in1k_20220323-48f8aeba.pth
+ Config: configs/convmixer/convmixer-1024-20_10xb64_in1k.py
+ Converted From:
+ Weights: https://github.com/tmp-iclr/convmixer/releases/download/v1.0/convmixer_1024_20_ks9_p14.pth.tar
+ Code: https://github.com/locuslab/convmixer
+ - Name: convmixer-1536-20_10xb64_in1k
+ Metadata:
+ FLOPs: 48713170944
+ Parameters: 51625960
+ In Collections: ConvMixer
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 81.37
+ Top 5 Accuracy: 95.61
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/convmixer/convmixer-1536_20_3rdparty_10xb64_in1k_20220323-ea5786f3.pth
+ Config: configs/convmixer/convmixer-1536-20_10xb64_in1k.py
+ Converted From:
+ Weights: https://github.com/tmp-iclr/convmixer/releases/download/v1.0/convmixer_1536_20_ks9_p7.pth.tar
+ Code: https://github.com/locuslab/convmixer
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/convnext/README.md b/openmmlab_test/mmclassification-0.24.1/configs/convnext/README.md
new file mode 100644
index 00000000..7db81366
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/convnext/README.md
@@ -0,0 +1,59 @@
+# ConvNeXt
+
+> [A ConvNet for the 2020s](https://arxiv.org/abs/2201.03545v1)
+
+
+
+## Abstract
+
+
+
+The "Roaring 20s" of visual recognition began with the introduction of Vision Transformers (ViTs), which quickly superseded ConvNets as the state-of-the-art image classification model. A vanilla ViT, on the other hand, faces difficulties when applied to general computer vision tasks such as object detection and semantic segmentation. It is the hierarchical Transformers (e.g., Swin Transformers) that reintroduced several ConvNet priors, making Transformers practically viable as a generic vision backbone and demonstrating remarkable performance on a wide variety of vision tasks. However, the effectiveness of such hybrid approaches is still largely credited to the intrinsic superiority of Transformers, rather than the inherent inductive biases of convolutions. In this work, we reexamine the design spaces and test the limits of what a pure ConvNet can achieve. We gradually "modernize" a standard ResNet toward the design of a vision Transformer, and discover several key components that contribute to the performance difference along the way. The outcome of this exploration is a family of pure ConvNet models dubbed ConvNeXt. Constructed entirely from standard ConvNet modules, ConvNeXts compete favorably with Transformers in terms of accuracy and scalability, achieving 87.8% ImageNet top-1 accuracy and outperforming Swin Transformers on COCO detection and ADE20K segmentation, while maintaining the simplicity and efficiency of standard ConvNets.
+
+
+
+
+

+
+
+## Results and models
+
+### ImageNet-1k
+
+| Model | Pretrain | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download |
+| :-----------: | :----------: | :-------: | :------: | :-------: | :-------: | :-------------------------------------------------------------------: | :---------------------------------------------------------------------: |
+| ConvNeXt-T\* | From scratch | 28.59 | 4.46 | 82.05 | 95.86 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/convnext/convnext-tiny_32xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/convnext/convnext-tiny_3rdparty_32xb128_in1k_20220124-18abde00.pth) |
+| ConvNeXt-S\* | From scratch | 50.22 | 8.69 | 83.13 | 96.44 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/convnext/convnext-small_32xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/convnext/convnext-small_3rdparty_32xb128_in1k_20220124-d39b5192.pth) |
+| ConvNeXt-B\* | From scratch | 88.59 | 15.36 | 83.85 | 96.74 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/convnext/convnext-base_32xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/convnext/convnext-base_3rdparty_32xb128_in1k_20220124-d0915162.pth) |
+| ConvNeXt-B\* | ImageNet-21k | 88.59 | 15.36 | 85.81 | 97.86 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/convnext/convnext-base_32xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/convnext/convnext-base_in21k-pre-3rdparty_32xb128_in1k_20220124-eb2d6ada.pth) |
+| ConvNeXt-L\* | From scratch | 197.77 | 34.37 | 84.30 | 96.89 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/convnext/convnext-large_64xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/convnext/convnext-large_3rdparty_64xb64_in1k_20220124-f8a0ded0.pth) |
+| ConvNeXt-L\* | ImageNet-21k | 197.77 | 34.37 | 86.61 | 98.04 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/convnext/convnext-large_64xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/convnext/convnext-large_in21k-pre-3rdparty_64xb64_in1k_20220124-2412403d.pth) |
+| ConvNeXt-XL\* | ImageNet-21k | 350.20 | 60.93 | 86.97 | 98.20 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/convnext/convnext-xlarge_64xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/convnext/convnext-xlarge_in21k-pre-3rdparty_64xb64_in1k_20220124-76b6863d.pth) |
+
+*Models with * are converted from the [official repo](https://github.com/facebookresearch/ConvNeXt). The config files of these models are only for inference. We don't ensure these config files' training accuracy and welcome you to contribute your reproduction results.*
+
+### Pre-trained Models
+
+The pre-trained models on ImageNet-1k or ImageNet-21k are used to fine-tune on the downstream tasks.
+
+| Model | Training Data | Params(M) | Flops(G) | Download |
+| :-----------: | :-----------: | :-------: | :------: | :-----------------------------------------------------------------------------------------------------------------------------------: |
+| ConvNeXt-T\* | ImageNet-1k | 28.59 | 4.46 | [model](https://download.openmmlab.com/mmclassification/v0/convnext/convnext-tiny_3rdparty_32xb128-noema_in1k_20220222-2908964a.pth) |
+| ConvNeXt-S\* | ImageNet-1k | 50.22 | 8.69 | [model](https://download.openmmlab.com/mmclassification/v0/convnext/convnext-small_3rdparty_32xb128-noema_in1k_20220222-fa001ca5.pth) |
+| ConvNeXt-B\* | ImageNet-1k | 88.59 | 15.36 | [model](https://download.openmmlab.com/mmclassification/v0/convnext/convnext-base_3rdparty_32xb128-noema_in1k_20220222-dba4f95f.pth) |
+| ConvNeXt-B\* | ImageNet-21k | 88.59 | 15.36 | [model](https://download.openmmlab.com/mmclassification/v0/convnext/convnext-base_3rdparty_in21k_20220124-13b83eec.pth) |
+| ConvNeXt-L\* | ImageNet-21k | 197.77 | 34.37 | [model](https://download.openmmlab.com/mmclassification/v0/convnext/convnext-large_3rdparty_in21k_20220124-41b5a79f.pth) |
+| ConvNeXt-XL\* | ImageNet-21k | 350.20 | 60.93 | [model](https://download.openmmlab.com/mmclassification/v0/convnext/convnext-xlarge_3rdparty_in21k_20220124-f909bad7.pth) |
+
+*Models with * are converted from the [official repo](https://github.com/facebookresearch/ConvNeXt).*
+
+## Citation
+
+```bibtex
+@Article{liu2022convnet,
+ author = {Zhuang Liu and Hanzi Mao and Chao-Yuan Wu and Christoph Feichtenhofer and Trevor Darrell and Saining Xie},
+ title = {A ConvNet for the 2020s},
+ journal = {arXiv preprint arXiv:2201.03545},
+ year = {2022},
+}
+```
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/convnext/convnext-base_32xb128_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/convnext/convnext-base_32xb128_in1k.py
new file mode 100644
index 00000000..6c0450a4
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/convnext/convnext-base_32xb128_in1k.py
@@ -0,0 +1,12 @@
+_base_ = [
+ '../_base_/models/convnext/convnext-base.py',
+ '../_base_/datasets/imagenet_bs64_swin_224.py',
+ '../_base_/schedules/imagenet_bs1024_adamw_swin.py',
+ '../_base_/default_runtime.py',
+]
+
+data = dict(samples_per_gpu=128)
+
+optimizer = dict(lr=4e-3)
+
+custom_hooks = [dict(type='EMAHook', momentum=4e-5, priority='ABOVE_NORMAL')]
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/convnext/convnext-large_64xb64_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/convnext/convnext-large_64xb64_in1k.py
new file mode 100644
index 00000000..1faae253
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/convnext/convnext-large_64xb64_in1k.py
@@ -0,0 +1,12 @@
+_base_ = [
+ '../_base_/models/convnext/convnext-large.py',
+ '../_base_/datasets/imagenet_bs64_swin_224.py',
+ '../_base_/schedules/imagenet_bs1024_adamw_swin.py',
+ '../_base_/default_runtime.py',
+]
+
+data = dict(samples_per_gpu=64)
+
+optimizer = dict(lr=4e-3)
+
+custom_hooks = [dict(type='EMAHook', momentum=4e-5, priority='ABOVE_NORMAL')]
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/convnext/convnext-small_32xb128_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/convnext/convnext-small_32xb128_in1k.py
new file mode 100644
index 00000000..d820fc6c
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/convnext/convnext-small_32xb128_in1k.py
@@ -0,0 +1,12 @@
+_base_ = [
+ '../_base_/models/convnext/convnext-small.py',
+ '../_base_/datasets/imagenet_bs64_swin_224.py',
+ '../_base_/schedules/imagenet_bs1024_adamw_swin.py',
+ '../_base_/default_runtime.py',
+]
+
+data = dict(samples_per_gpu=128)
+
+optimizer = dict(lr=4e-3)
+
+custom_hooks = [dict(type='EMAHook', momentum=4e-5, priority='ABOVE_NORMAL')]
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/convnext/convnext-tiny_32xb128_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/convnext/convnext-tiny_32xb128_in1k.py
new file mode 100644
index 00000000..46d0185d
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/convnext/convnext-tiny_32xb128_in1k.py
@@ -0,0 +1,12 @@
+_base_ = [
+ '../_base_/models/convnext/convnext-tiny.py',
+ '../_base_/datasets/imagenet_bs64_swin_224.py',
+ '../_base_/schedules/imagenet_bs1024_adamw_swin.py',
+ '../_base_/default_runtime.py',
+]
+
+data = dict(samples_per_gpu=128)
+
+optimizer = dict(lr=4e-3)
+
+custom_hooks = [dict(type='EMAHook', momentum=4e-5, priority='ABOVE_NORMAL')]
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/convnext/convnext-xlarge_64xb64_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/convnext/convnext-xlarge_64xb64_in1k.py
new file mode 100644
index 00000000..72849013
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/convnext/convnext-xlarge_64xb64_in1k.py
@@ -0,0 +1,12 @@
+_base_ = [
+ '../_base_/models/convnext/convnext-xlarge.py',
+ '../_base_/datasets/imagenet_bs64_swin_224.py',
+ '../_base_/schedules/imagenet_bs1024_adamw_swin.py',
+ '../_base_/default_runtime.py',
+]
+
+data = dict(samples_per_gpu=64)
+
+optimizer = dict(lr=4e-3)
+
+custom_hooks = [dict(type='EMAHook', momentum=4e-5, priority='ABOVE_NORMAL')]
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/convnext/metafile.yml b/openmmlab_test/mmclassification-0.24.1/configs/convnext/metafile.yml
new file mode 100644
index 00000000..823f3327
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/convnext/metafile.yml
@@ -0,0 +1,221 @@
+Collections:
+ - Name: ConvNeXt
+ Metadata:
+ Training Data: ImageNet-1k
+ Architecture:
+ - 1x1 Convolution
+ - LayerScale
+ Paper:
+ URL: https://arxiv.org/abs/2201.03545v1
+ Title: A ConvNet for the 2020s
+ README: configs/convnext/README.md
+ Code:
+ Version: v0.20.1
+ URL: https://github.com/open-mmlab/mmclassification/blob/v0.20.1/mmcls/models/backbones/convnext.py
+
+Models:
+ - Name: convnext-tiny_3rdparty_32xb128_in1k
+ Metadata:
+ FLOPs: 4457472768
+ Parameters: 28589128
+ In Collections: ConvNeXt
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 82.05
+ Top 5 Accuracy: 95.86
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/convnext/convnext-tiny_3rdparty_32xb128_in1k_20220124-18abde00.pth
+ Config: configs/convnext/convnext-tiny_32xb128_in1k.py
+ Converted From:
+ Weights: https://dl.fbaipublicfiles.com/convnext/convnext_tiny_1k_224_ema.pth
+ Code: https://github.com/facebookresearch/ConvNeXt
+ - Name: convnext-tiny_3rdparty_32xb128-noema_in1k
+ Metadata:
+ Training Data: ImageNet-1k
+ FLOPs: 4457472768
+ Parameters: 28589128
+ In Collections: ConvNeXt
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 81.81
+ Top 5 Accuracy: 95.67
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/convnext/convnext-tiny_3rdparty_32xb128-noema_in1k_20220222-2908964a.pth
+ Config: configs/convnext/convnext-tiny_32xb128_in1k.py
+ Converted From:
+ Weights: https://dl.fbaipublicfiles.com/convnext/convnext_tiny_1k_224.pth
+ Code: https://github.com/facebookresearch/ConvNeXt
+ - Name: convnext-small_3rdparty_32xb128_in1k
+ Metadata:
+ FLOPs: 8687008512
+ Parameters: 50223688
+ In Collections: ConvNeXt
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 83.13
+ Top 5 Accuracy: 96.44
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/convnext/convnext-small_3rdparty_32xb128_in1k_20220124-d39b5192.pth
+ Config: configs/convnext/convnext-small_32xb128_in1k.py
+ Converted From:
+ Weights: https://dl.fbaipublicfiles.com/convnext/convnext_small_1k_224_ema.pth
+ Code: https://github.com/facebookresearch/ConvNeXt
+ - Name: convnext-small_3rdparty_32xb128-noema_in1k
+ Metadata:
+ Training Data: ImageNet-1k
+ FLOPs: 8687008512
+ Parameters: 50223688
+ In Collections: ConvNeXt
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 83.11
+ Top 5 Accuracy: 96.34
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/convnext/convnext-small_3rdparty_32xb128-noema_in1k_20220222-fa001ca5.pth
+ Config: configs/convnext/convnext-small_32xb128_in1k.py
+ Converted From:
+ Weights: https://dl.fbaipublicfiles.com/convnext/convnext_small_1k_224.pth
+ Code: https://github.com/facebookresearch/ConvNeXt
+ - Name: convnext-base_3rdparty_32xb128_in1k
+ Metadata:
+ FLOPs: 15359124480
+ Parameters: 88591464
+ In Collections: ConvNeXt
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 83.85
+ Top 5 Accuracy: 96.74
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/convnext/convnext-base_3rdparty_32xb128_in1k_20220124-d0915162.pth
+ Config: configs/convnext/convnext-base_32xb128_in1k.py
+ Converted From:
+ Weights: https://dl.fbaipublicfiles.com/convnext/convnext_base_1k_224_ema.pth
+ Code: https://github.com/facebookresearch/ConvNeXt
+ - Name: convnext-base_3rdparty_32xb128-noema_in1k
+ Metadata:
+ Training Data: ImageNet-1k
+ FLOPs: 15359124480
+ Parameters: 88591464
+ In Collections: ConvNeXt
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 83.71
+ Top 5 Accuracy: 96.60
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/convnext/convnext-base_3rdparty_32xb128-noema_in1k_20220222-dba4f95f.pth
+ Config: configs/convnext/convnext-base_32xb128_in1k.py
+ Converted From:
+ Weights: https://dl.fbaipublicfiles.com/convnext/convnext_base_1k_224.pth
+ Code: https://github.com/facebookresearch/ConvNeXt
+ - Name: convnext-base_3rdparty_in21k
+ Metadata:
+ Training Data: ImageNet-21k
+ FLOPs: 15359124480
+ Parameters: 88591464
+ In Collections: ConvNeXt
+ Results: null
+ Weights: https://download.openmmlab.com/mmclassification/v0/convnext/convnext-base_3rdparty_in21k_20220124-13b83eec.pth
+ Converted From:
+ Weights: https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_224.pth
+ Code: https://github.com/facebookresearch/ConvNeXt
+ - Name: convnext-base_in21k-pre-3rdparty_32xb128_in1k
+ Metadata:
+ Training Data:
+ - ImageNet-21k
+ - ImageNet-1k
+ FLOPs: 15359124480
+ Parameters: 88591464
+ In Collections: ConvNeXt
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 85.81
+ Top 5 Accuracy: 97.86
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/convnext/convnext-base_in21k-pre-3rdparty_32xb128_in1k_20220124-eb2d6ada.pth
+ Config: configs/convnext/convnext-base_32xb128_in1k.py
+ Converted From:
+ Weights: https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_1k_224.pth
+ Code: https://github.com/facebookresearch/ConvNeXt
+ - Name: convnext-large_3rdparty_64xb64_in1k
+ Metadata:
+ FLOPs: 34368026112
+ Parameters: 197767336
+ In Collections: ConvNeXt
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 84.30
+ Top 5 Accuracy: 96.89
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/convnext/convnext-large_3rdparty_64xb64_in1k_20220124-f8a0ded0.pth
+ Config: configs/convnext/convnext-large_64xb64_in1k.py
+ Converted From:
+ Weights: https://dl.fbaipublicfiles.com/convnext/convnext_large_1k_224_ema.pth
+ Code: https://github.com/facebookresearch/ConvNeXt
+ - Name: convnext-large_3rdparty_in21k
+ Metadata:
+ Training Data: ImageNet-21k
+ FLOPs: 34368026112
+ Parameters: 197767336
+ In Collections: ConvNeXt
+ Results: null
+ Weights: https://download.openmmlab.com/mmclassification/v0/convnext/convnext-large_3rdparty_in21k_20220124-41b5a79f.pth
+ Converted From:
+ Weights: https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_224.pth
+ Code: https://github.com/facebookresearch/ConvNeXt
+ - Name: convnext-large_in21k-pre-3rdparty_64xb64_in1k
+ Metadata:
+ Training Data:
+ - ImageNet-21k
+ - ImageNet-1k
+ FLOPs: 34368026112
+ Parameters: 197767336
+ In Collections: ConvNeXt
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 86.61
+ Top 5 Accuracy: 98.04
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/convnext/convnext-large_in21k-pre-3rdparty_64xb64_in1k_20220124-2412403d.pth
+ Config: configs/convnext/convnext-large_64xb64_in1k.py
+ Converted From:
+ Weights: https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_1k_224.pth
+ Code: https://github.com/facebookresearch/ConvNeXt
+ - Name: convnext-xlarge_3rdparty_in21k
+ Metadata:
+ Training Data: ImageNet-21k
+ FLOPs: 60929820672
+ Parameters: 350196968
+ In Collections: ConvNeXt
+ Results: null
+ Weights: https://download.openmmlab.com/mmclassification/v0/convnext/convnext-xlarge_3rdparty_in21k_20220124-f909bad7.pth
+ Converted From:
+ Weights: https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_224.pth
+ Code: https://github.com/facebookresearch/ConvNeXt
+ - Name: convnext-xlarge_in21k-pre-3rdparty_64xb64_in1k
+ Metadata:
+ Training Data:
+ - ImageNet-21k
+ - ImageNet-1k
+ FLOPs: 60929820672
+ Parameters: 350196968
+ In Collections: ConvNeXt
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 86.97
+ Top 5 Accuracy: 98.20
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/convnext/convnext-xlarge_in21k-pre-3rdparty_64xb64_in1k_20220124-76b6863d.pth
+ Config: configs/convnext/convnext-xlarge_64xb64_in1k.py
+ Converted From:
+ Weights: https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_1k_224_ema.pth
+ Code: https://github.com/facebookresearch/ConvNeXt
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/cspnet/README.md b/openmmlab_test/mmclassification-0.24.1/configs/cspnet/README.md
new file mode 100644
index 00000000..10eb9d0d
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/cspnet/README.md
@@ -0,0 +1,41 @@
+# CSPNet
+
+> [CSPNet: A New Backbone that can Enhance Learning Capability of CNN](https://arxiv.org/abs/1911.11929)
+
+
+
+## Abstract
+
+
+
+Neural networks have enabled state-of-the-art approaches to achieve incredible results on computer vision tasks such as object detection. However, such success greatly relies on costly computation resources, which hinders people with cheap devices from appreciating the advanced technology. In this paper, we propose Cross Stage Partial Network (CSPNet) to mitigate the problem that previous works require heavy inference computations from the network architecture perspective. We attribute the problem to the duplicate gradient information within network optimization. The proposed networks respect the variability of the gradients by integrating feature maps from the beginning and the end of a network stage, which, in our experiments, reduces computations by 20% with equivalent or even superior accuracy on the ImageNet dataset, and significantly outperforms state-of-the-art approaches in terms of AP50 on the MS COCO object detection dataset. The CSPNet is easy to implement and general enough to cope with architectures based on ResNet, ResNeXt, and DenseNet. Source code is at this https URL.
+
+
+
+
+

+
+
+## Results and models
+
+### ImageNet-1k
+
+| Model | Pretrain | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download |
+| :------------: | :----------: | :-------: | :------: | :-------: | :-------: | :------------------------------------------------------------------: | :---------------------------------------------------------------------: |
+| CSPDarkNet50\* | From scratch | 27.64 | 5.04 | 80.05 | 95.07 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/cspnet/cspdarknet50_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/cspnet/cspdarknet50_3rdparty_8xb32_in1k_20220329-bd275287.pth) |
+| CSPResNet50\* | From scratch | 21.62 | 3.48 | 79.55 | 94.68 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/cspnet/cspresnet50_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/cspnet/cspresnet50_3rdparty_8xb32_in1k_20220329-dd6dddfb.pth) |
+| CSPResNeXt50\* | From scratch | 20.57 | 3.11 | 79.96 | 94.96 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/cspnet/cspresnext50_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/cspnet/cspresnext50_3rdparty_8xb32_in1k_20220329-2cc84d21.pth) |
+
+*Models with * are converted from the [timm repo](https://github.com/rwightman/pytorch-image-models). The config files of these models are only for inference. We don't ensure these config files' training accuracy and welcome you to contribute your reproduction results.*
+
+## Citation
+
+```bibtex
+@inproceedings{wang2020cspnet,
+ title={CSPNet: A new backbone that can enhance learning capability of CNN},
+ author={Wang, Chien-Yao and Liao, Hong-Yuan Mark and Wu, Yueh-Hua and Chen, Ping-Yang and Hsieh, Jun-Wei and Yeh, I-Hau},
+ booktitle={Proceedings of the IEEE/CVF conference on computer vision and pattern recognition workshops},
+ pages={390--391},
+ year={2020}
+}
+```
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/cspnet/cspdarknet50_8xb32_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/cspnet/cspdarknet50_8xb32_in1k.py
new file mode 100644
index 00000000..cf2ce731
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/cspnet/cspdarknet50_8xb32_in1k.py
@@ -0,0 +1,65 @@
+_base_ = [
+ '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py'
+]
+
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(type='CSPDarkNet', depth=53),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=1024,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ))
+
+# dataset settings
+dataset_type = 'ImageNet'
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='RandomResizedCrop',
+ size=224,
+ backend='pillow',
+ interpolation='bicubic'),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='Resize',
+ size=(288, -1),
+ backend='pillow',
+ interpolation='bicubic'),
+ dict(type='CenterCrop', crop_size=256),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+data = dict(
+ samples_per_gpu=32,
+ workers_per_gpu=2,
+ train=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/train',
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline),
+ test=dict(
+ # replace `data/val` with `data/test` for standard test
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline))
+evaluation = dict(interval=1, metric='accuracy')
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/cspnet/cspresnet50_8xb32_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/cspnet/cspresnet50_8xb32_in1k.py
new file mode 100644
index 00000000..f4cfbf8a
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/cspnet/cspresnet50_8xb32_in1k.py
@@ -0,0 +1,66 @@
+_base_ = [
+ '../_base_/datasets/imagenet_bs32_pil_resize.py',
+ '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py'
+]
+
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(type='CSPResNet', depth=50),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=1024,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ))
+
+# dataset settings
+dataset_type = 'ImageNet'
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='RandomResizedCrop',
+ size=224,
+ backend='pillow',
+ interpolation='bicubic'),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='Resize',
+ size=(288, -1),
+ backend='pillow',
+ interpolation='bicubic'),
+ dict(type='CenterCrop', crop_size=256),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+data = dict(
+ samples_per_gpu=32,
+ workers_per_gpu=2,
+ train=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/train',
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline),
+ test=dict(
+ # replace `data/val` with `data/test` for standard test
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline))
+evaluation = dict(interval=1, metric='accuracy')
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/cspnet/cspresnext50_8xb32_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/cspnet/cspresnext50_8xb32_in1k.py
new file mode 100644
index 00000000..a82ab751
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/cspnet/cspresnext50_8xb32_in1k.py
@@ -0,0 +1,65 @@
+_base_ = [
+ '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py'
+]
+
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(type='CSPResNeXt', depth=50),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=2048,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ))
+
+# dataset settings
+dataset_type = 'ImageNet'
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='RandomResizedCrop',
+ size=224,
+ backend='pillow',
+ interpolation='bicubic'),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='Resize',
+ size=(256, -1),
+ backend='pillow',
+ interpolation='bicubic'),
+ dict(type='CenterCrop', crop_size=224),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+data = dict(
+ samples_per_gpu=32,
+ workers_per_gpu=2,
+ train=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/train',
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline),
+ test=dict(
+ # replace `data/val` with `data/test` for standard test
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline))
+evaluation = dict(interval=1, metric='accuracy')
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/cspnet/metafile.yml b/openmmlab_test/mmclassification-0.24.1/configs/cspnet/metafile.yml
new file mode 100644
index 00000000..8c4a78ed
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/cspnet/metafile.yml
@@ -0,0 +1,64 @@
+Collections:
+ - Name: CSPNet
+ Metadata:
+ Training Data: ImageNet-1k
+ Architecture:
+ - Cross Stage Partia Stage
+ Paper:
+ URL: https://arxiv.org/abs/1911.11929
+ Title: 'CSPNet: A New Backbone that can Enhance Learning Capability of CNN'
+ README: configs/cspnet/README.md
+ Code:
+ Version: v0.22.0
+ URL: https://github.com/open-mmlab/mmclassification/blob/v0.22.0/mmcls/models/backbones/cspnet.py
+
+Models:
+ - Name: cspdarknet50_3rdparty_8xb32_in1k
+ Metadata:
+ FLOPs: 5040000000
+ Parameters: 27640000
+ In Collections: CSPNet
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 80.05
+ Top 5 Accuracy: 95.07
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/cspnet/cspdarknet50_3rdparty_8xb32_in1k_20220329-bd275287.pth
+ Config: configs/cspnet/cspdarknet50_8xb32_in1k.py
+ Converted From:
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/cspdarknet53_ra_256-d05c7c21.pth
+ Code: https://github.com/rwightman/pytorch-image-models
+ - Name: cspresnet50_3rdparty_8xb32_in1k
+ Metadata:
+ Training Data: ImageNet-1k
+ FLOPs: 3480000000
+ Parameters: 21620000
+ In Collections: CSPNet
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 79.55
+ Top 5 Accuracy: 94.68
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/cspnet/cspresnet50_3rdparty_8xb32_in1k_20220329-dd6dddfb.pth
+ Config: configs/cspnet/cspresnet50_8xb32_in1k.py
+ Converted From:
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/cspresnet50_ra-d3e8d487.pth
+ Code: https://github.com/rwightman/pytorch-image-models
+ - Name: cspresnext50_3rdparty_8xb32_in1k
+ Metadata:
+ FLOPs: 3110000000
+ Parameters: 20570000
+ In Collections: CSPNet
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 79.96
+ Top 5 Accuracy: 94.96
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/cspnet/cspresnext50_3rdparty_8xb32_in1k_20220329-2cc84d21.pth
+ Config: configs/cspnet/cspresnext50_8xb32_in1k.py
+ Converted From:
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/cspresnext50_ra_224-648b4713.pth
+ Code: https://github.com/rwightman/pytorch-image-models
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/csra/README.md b/openmmlab_test/mmclassification-0.24.1/configs/csra/README.md
new file mode 100644
index 00000000..fa677cfc
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/csra/README.md
@@ -0,0 +1,36 @@
+# CSRA
+
+> [Residual Attention: A Simple but Effective Method for Multi-Label Recognition](https://arxiv.org/abs/2108.02456)
+
+
+
+## Abstract
+
+Multi-label image recognition is a challenging computer vision task of practical use. Progresses in this area, however, are often characterized by complicated methods, heavy computations, and lack of intuitive explanations. To effectively capture different spatial regions occupied by objects from different categories, we propose an embarrassingly simple module, named class-specific residual attention (CSRA). CSRA generates class-specific features for every category by proposing a simple spatial attention score, and then combines it with the class-agnostic average pooling feature. CSRA achieves state-of-the-art results on multilabel recognition, and at the same time is much simpler than them. Furthermore, with only 4 lines of code, CSRA also leads to consistent improvement across many diverse pretrained models and datasets without any extra training. CSRA is both easy to implement and light in computations, which also enjoys intuitive explanations and visualizations.
+
+
+

+
+
+## Results and models
+
+### VOC2007
+
+| Model | Pretrain | Params(M) | Flops(G) | mAP | OF1 (%) | CF1 (%) | Config | Download |
+| :------------: | :------------------------------------------------: | :-------: | :------: | :---: | :-----: | :-----: | :-----------------------------------------------: | :-------------------------------------------------: |
+| Resnet101-CSRA | [ImageNet-1k](https://download.openmmlab.com/mmclassification/v0/resnet/resnet101_8xb32_in1k_20210831-539c63f8.pth) | 23.55 | 4.12 | 94.98 | 90.80 | 89.16 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/csra/resnet101-csra_1xb16_voc07-448px.py) | [model](https://download.openmmlab.com/mmclassification/v0/csra/resnet101-csra_1xb16_voc07-448px_20220722-29efb40a.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/csra/resnet101-csra_1xb16_voc07-448px_20220722-29efb40a.log.json) |
+
+## Citation
+
+```bibtex
+@misc{https://doi.org/10.48550/arxiv.2108.02456,
+ doi = {10.48550/ARXIV.2108.02456},
+ url = {https://arxiv.org/abs/2108.02456},
+ author = {Zhu, Ke and Wu, Jianxin},
+ keywords = {Computer Vision and Pattern Recognition (cs.CV), FOS: Computer and information sciences, FOS: Computer and information sciences},
+ title = {Residual Attention: A Simple but Effective Method for Multi-Label Recognition},
+ publisher = {arXiv},
+ year = {2021},
+ copyright = {arXiv.org perpetual, non-exclusive license}
+}
+```
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/csra/metafile.yml b/openmmlab_test/mmclassification-0.24.1/configs/csra/metafile.yml
new file mode 100644
index 00000000..f1fa6228
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/csra/metafile.yml
@@ -0,0 +1,29 @@
+Collections:
+ - Name: CSRA
+ Metadata:
+ Training Data: PASCAL VOC 2007
+ Architecture:
+ - Class-specific Residual Attention
+ Paper:
+ URL: https://arxiv.org/abs/1911.11929
+ Title: 'Residual Attention: A Simple but Effective Method for Multi-Label Recognition'
+ README: configs/csra/README.md
+ Code:
+ Version: v0.24.0
+ URL: https://github.com/open-mmlab/mmclassification/blob/v0.24.0/mmcls/models/heads/multi_label_csra_head.py
+
+Models:
+ - Name: resnet101-csra_1xb16_voc07-448px
+ Metadata:
+ FLOPs: 4120000000
+ Parameters: 23550000
+ In Collections: CSRA
+ Results:
+ - Dataset: PASCAL VOC 2007
+ Metrics:
+ mAP: 94.98
+ OF1: 90.80
+ CF1: 89.16
+ Task: Multi-Label Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/csra/resnet101-csra_1xb16_voc07-448px_20220722-29efb40a.pth
+ Config: configs/csra/resnet101-csra_1xb16_voc07-448px.py
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/csra/resnet101-csra_1xb16_voc07-448px.py b/openmmlab_test/mmclassification-0.24.1/configs/csra/resnet101-csra_1xb16_voc07-448px.py
new file mode 100644
index 00000000..5dc5dd62
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/csra/resnet101-csra_1xb16_voc07-448px.py
@@ -0,0 +1,75 @@
+_base_ = ['../_base_/datasets/voc_bs16.py', '../_base_/default_runtime.py']
+
+# Pre-trained Checkpoint Path
+checkpoint = 'https://download.openmmlab.com/mmclassification/v0/resnet/resnet101_8xb32_in1k_20210831-539c63f8.pth' # noqa
+# If you want to use the pre-trained weight of ResNet101-CutMix from
+# the originary repo(https://github.com/Kevinz-code/CSRA). Script of
+# 'tools/convert_models/torchvision_to_mmcls.py' can help you convert weight
+# into mmcls format. The mAP result would hit 95.5 by using the weight.
+# checkpoint = 'PATH/TO/PRE-TRAINED_WEIGHT'
+
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='ResNet',
+ depth=101,
+ num_stages=4,
+ out_indices=(3, ),
+ style='pytorch',
+ init_cfg=dict(
+ type='Pretrained', checkpoint=checkpoint, prefix='backbone')),
+ neck=None,
+ head=dict(
+ type='CSRAClsHead',
+ num_classes=20,
+ in_channels=2048,
+ num_heads=1,
+ lam=0.1,
+ loss=dict(type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)))
+
+# dataset setting
+img_norm_cfg = dict(mean=[0, 0, 0], std=[255, 255, 255], to_rgb=True)
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='RandomResizedCrop', size=448, scale=(0.7, 1.0)),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='Resize', size=448),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+data = dict(
+ # map the difficult examples as negative ones(0)
+ train=dict(pipeline=train_pipeline, difficult_as_postive=False),
+ val=dict(pipeline=test_pipeline),
+ test=dict(pipeline=test_pipeline))
+
+# optimizer
+# the lr of classifier.head is 10 * base_lr, which help convergence.
+optimizer = dict(
+ type='SGD',
+ lr=0.0002,
+ momentum=0.9,
+ weight_decay=0.0001,
+ paramwise_cfg=dict(custom_keys={'head': dict(lr_mult=10)}))
+
+optimizer_config = dict(grad_clip=None)
+
+# learning policy
+lr_config = dict(
+ policy='step',
+ step=6,
+ gamma=0.1,
+ warmup='linear',
+ warmup_iters=1,
+ warmup_ratio=1e-7,
+ warmup_by_epoch=True)
+runner = dict(type='EpochBasedRunner', max_epochs=20)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/deit/README.md b/openmmlab_test/mmclassification-0.24.1/configs/deit/README.md
new file mode 100644
index 00000000..e3103658
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/deit/README.md
@@ -0,0 +1,52 @@
+# DeiT
+
+> [Training data-efficient image transformers & distillation through attention](https://arxiv.org/abs/2012.12877)
+
+
+
+## Abstract
+
+Recently, neural networks purely based on attention were shown to address image understanding tasks such as image classification. However, these visual transformers are pre-trained with hundreds of millions of images using an expensive infrastructure, thereby limiting their adoption. In this work, we produce a competitive convolution-free transformer by training on Imagenet only. We train them on a single computer in less than 3 days. Our reference vision transformer (86M parameters) achieves top-1 accuracy of 83.1% (single-crop evaluation) on ImageNet with no external data. More importantly, we introduce a teacher-student strategy specific to transformers. It relies on a distillation token ensuring that the student learns from the teacher through attention. We show the interest of this token-based distillation, especially when using a convnet as a teacher. This leads us to report results competitive with convnets for both Imagenet (where we obtain up to 85.2% accuracy) and when transferring to other tasks. We share our code and models.
+
+
+

+
+
+## Results and models
+
+### ImageNet-1k
+
+The teacher of the distilled version DeiT is RegNetY-16GF.
+
+| Model | Pretrain | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download |
+| :-------------------------: | :----------: | :-------: | :------: | :-------: | :-------: | :------------------------------------------------------------: | :--------------------------------------------------------------: |
+| DeiT-tiny | From scratch | 5.72 | 1.08 | 74.50 | 92.24 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/deit/deit-tiny_pt-4xb256_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/deit/deit-tiny_pt-4xb256_in1k_20220218-13b382a0.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/deit/deit-tiny_pt-4xb256_in1k_20220218-13b382a0.log.json) |
+| DeiT-tiny distilled\* | From scratch | 5.72 | 1.08 | 74.51 | 91.90 | [config](https://github.com/open-mmlab/mmclassification/tree/master/configs/deit/deit-tiny-distilled_pt-4xb256_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/deit/deit-tiny-distilled_3rdparty_pt-4xb256_in1k_20211216-c429839a.pth) |
+| DeiT-small | From scratch | 22.05 | 4.24 | 80.69 | 95.06 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/deit/deit-small_pt-4xb256_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/deit/deit-small_pt-4xb256_in1k_20220218-9425b9bb.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/deit/deit-small_pt-4xb256_in1k_20220218-9425b9bb.log.json) |
+| DeiT-small distilled\* | From scratch | 22.05 | 4.24 | 81.17 | 95.40 | [config](https://github.com/open-mmlab/mmclassification/tree/master/configs/deit/deit-small-distilled_pt-4xb256_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/deit/deit-small-distilled_3rdparty_pt-4xb256_in1k_20211216-4de1d725.pth) |
+| DeiT-base | From scratch | 86.57 | 16.86 | 81.76 | 95.81 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/deit/deit-base_pt-16xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/deit/deit-base_pt-16xb64_in1k_20220216-db63c16c.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/deit/deit-base_pt-16xb64_in1k_20220216-db63c16c.log.json) |
+| DeiT-base\* | From scratch | 86.57 | 16.86 | 81.79 | 95.59 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/deit/deit-base_pt-16xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/deit/deit-base_3rdparty_pt-16xb64_in1k_20211124-6f40c188.pth) |
+| DeiT-base distilled\* | From scratch | 86.57 | 16.86 | 83.33 | 96.49 | [config](https://github.com/open-mmlab/mmclassification/tree/master/configs/deit/deit-base-distilled_pt-16xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/deit/deit-base-distilled_3rdparty_pt-16xb64_in1k_20211216-42891296.pth) |
+| DeiT-base 384px\* | ImageNet-1k | 86.86 | 49.37 | 83.04 | 96.31 | [config](https://github.com/open-mmlab/mmclassification/tree/master/configs/deit/deit-base_ft-16xb32_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/deit/deit-base_3rdparty_ft-16xb32_in1k-384px_20211124-822d02f2.pth) |
+| DeiT-base distilled 384px\* | ImageNet-1k | 86.86 | 49.37 | 85.55 | 97.35 | [config](https://github.com/open-mmlab/mmclassification/tree/master/configs/deit/deit-base-distilled_ft-16xb32_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/deit/deit-base-distilled_3rdparty_ft-16xb32_in1k-384px_20211216-e48d6000.pth) |
+
+*Models with * are converted from the [official repo](https://github.com/facebookresearch/deit). The config files of these models are only for validation. We don't ensure these config files' training accuracy and welcome you to contribute your reproduction results.*
+
+```{warning}
+MMClassification doesn't support training the distilled version DeiT.
+And we provide distilled version checkpoints for inference only.
+```
+
+## Citation
+
+```
+@InProceedings{pmlr-v139-touvron21a,
+ title = {Training data-efficient image transformers & distillation through attention},
+ author = {Touvron, Hugo and Cord, Matthieu and Douze, Matthijs and Massa, Francisco and Sablayrolles, Alexandre and Jegou, Herve},
+ booktitle = {International Conference on Machine Learning},
+ pages = {10347--10357},
+ year = {2021},
+ volume = {139},
+ month = {July}
+}
+```
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/deit/deit-base-distilled_ft-16xb32_in1k-384px.py b/openmmlab_test/mmclassification-0.24.1/configs/deit/deit-base-distilled_ft-16xb32_in1k-384px.py
new file mode 100644
index 00000000..c8bdfb53
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/deit/deit-base-distilled_ft-16xb32_in1k-384px.py
@@ -0,0 +1,9 @@
+_base_ = './deit-base_ft-16xb32_in1k-384px.py'
+
+# model settings
+model = dict(
+ backbone=dict(type='DistilledVisionTransformer'),
+ head=dict(type='DeiTClsHead'),
+ # Change to the path of the pretrained model
+ # init_cfg=dict(type='Pretrained', checkpoint=''),
+)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/deit/deit-base-distilled_pt-16xb64_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/deit/deit-base-distilled_pt-16xb64_in1k.py
new file mode 100644
index 00000000..67165838
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/deit/deit-base-distilled_pt-16xb64_in1k.py
@@ -0,0 +1,10 @@
+_base_ = './deit-small_pt-4xb256_in1k.py'
+
+# model settings
+model = dict(
+ backbone=dict(type='DistilledVisionTransformer', arch='deit-base'),
+ head=dict(type='DeiTClsHead', in_channels=768),
+)
+
+# data settings
+data = dict(samples_per_gpu=64, workers_per_gpu=5)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/deit/deit-base_ft-16xb32_in1k-384px.py b/openmmlab_test/mmclassification-0.24.1/configs/deit/deit-base_ft-16xb32_in1k-384px.py
new file mode 100644
index 00000000..db444168
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/deit/deit-base_ft-16xb32_in1k-384px.py
@@ -0,0 +1,29 @@
+_base_ = [
+ '../_base_/datasets/imagenet_bs64_swin_384.py',
+ '../_base_/schedules/imagenet_bs4096_AdamW.py',
+ '../_base_/default_runtime.py'
+]
+
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='VisionTransformer',
+ arch='deit-base',
+ img_size=384,
+ patch_size=16,
+ ),
+ neck=None,
+ head=dict(
+ type='VisionTransformerClsHead',
+ num_classes=1000,
+ in_channels=768,
+ loss=dict(
+ type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'),
+ ),
+ # Change to the path of the pretrained model
+ # init_cfg=dict(type='Pretrained', checkpoint=''),
+)
+
+# data settings
+data = dict(samples_per_gpu=32, workers_per_gpu=5)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/deit/deit-base_pt-16xb64_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/deit/deit-base_pt-16xb64_in1k.py
new file mode 100644
index 00000000..24c13dca
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/deit/deit-base_pt-16xb64_in1k.py
@@ -0,0 +1,13 @@
+_base_ = './deit-small_pt-4xb256_in1k.py'
+
+# model settings
+model = dict(
+ backbone=dict(
+ type='VisionTransformer', arch='deit-base', drop_path_rate=0.1),
+ head=dict(type='VisionTransformerClsHead', in_channels=768),
+)
+
+# data settings
+data = dict(samples_per_gpu=64, workers_per_gpu=5)
+
+custom_hooks = [dict(type='EMAHook', momentum=4e-5, priority='ABOVE_NORMAL')]
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/deit/deit-small-distilled_pt-4xb256_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/deit/deit-small-distilled_pt-4xb256_in1k.py
new file mode 100644
index 00000000..3b1fac22
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/deit/deit-small-distilled_pt-4xb256_in1k.py
@@ -0,0 +1,7 @@
+_base_ = './deit-small_pt-4xb256_in1k.py'
+
+# model settings
+model = dict(
+ backbone=dict(type='DistilledVisionTransformer', arch='deit-small'),
+ head=dict(type='DeiTClsHead', in_channels=384),
+)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/deit/deit-small_pt-4xb256_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/deit/deit-small_pt-4xb256_in1k.py
new file mode 100644
index 00000000..550f0801
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/deit/deit-small_pt-4xb256_in1k.py
@@ -0,0 +1,44 @@
+# In small and tiny arch, remove drop path and EMA hook comparing with the
+# original config
+_base_ = [
+ '../_base_/datasets/imagenet_bs64_swin_224.py',
+ '../_base_/schedules/imagenet_bs1024_adamw_swin.py',
+ '../_base_/default_runtime.py'
+]
+
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='VisionTransformer',
+ arch='deit-small',
+ img_size=224,
+ patch_size=16),
+ neck=None,
+ head=dict(
+ type='VisionTransformerClsHead',
+ num_classes=1000,
+ in_channels=384,
+ loss=dict(
+ type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'),
+ ),
+ init_cfg=[
+ dict(type='TruncNormal', layer='Linear', std=.02),
+ dict(type='Constant', layer='LayerNorm', val=1., bias=0.),
+ ],
+ train_cfg=dict(augments=[
+ dict(type='BatchMixup', alpha=0.8, num_classes=1000, prob=0.5),
+ dict(type='BatchCutMix', alpha=1.0, num_classes=1000, prob=0.5)
+ ]))
+
+# data settings
+data = dict(samples_per_gpu=256, workers_per_gpu=5)
+
+paramwise_cfg = dict(
+ norm_decay_mult=0.0,
+ bias_decay_mult=0.0,
+ custom_keys={
+ '.cls_token': dict(decay_mult=0.0),
+ '.pos_embed': dict(decay_mult=0.0)
+ })
+optimizer = dict(paramwise_cfg=paramwise_cfg)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/deit/deit-tiny-distilled_pt-4xb256_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/deit/deit-tiny-distilled_pt-4xb256_in1k.py
new file mode 100644
index 00000000..175f9804
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/deit/deit-tiny-distilled_pt-4xb256_in1k.py
@@ -0,0 +1,7 @@
+_base_ = './deit-small_pt-4xb256_in1k.py'
+
+# model settings
+model = dict(
+ backbone=dict(type='DistilledVisionTransformer', arch='deit-tiny'),
+ head=dict(type='DeiTClsHead', in_channels=192),
+)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/deit/deit-tiny_pt-4xb256_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/deit/deit-tiny_pt-4xb256_in1k.py
new file mode 100644
index 00000000..43df6e13
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/deit/deit-tiny_pt-4xb256_in1k.py
@@ -0,0 +1,7 @@
+_base_ = './deit-small_pt-4xb256_in1k.py'
+
+# model settings
+model = dict(
+ backbone=dict(type='VisionTransformer', arch='deit-tiny'),
+ head=dict(type='VisionTransformerClsHead', in_channels=192),
+)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/deit/metafile.yml b/openmmlab_test/mmclassification-0.24.1/configs/deit/metafile.yml
new file mode 100644
index 00000000..ddd4c674
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/deit/metafile.yml
@@ -0,0 +1,153 @@
+Collections:
+ - Name: DeiT
+ Metadata:
+ Training Data: ImageNet-1k
+ Architecture:
+ - Layer Normalization
+ - Scaled Dot-Product Attention
+ - Attention Dropout
+ - Multi-Head Attention
+ Paper:
+ URL: https://arxiv.org/abs/2012.12877
+ Title: "Training data-efficient image transformers & distillation through attention"
+ README: configs/deit/README.md
+ Code:
+ URL: v0.19.0
+ Version: https://github.com/open-mmlab/mmclassification/blob/v0.19.0/mmcls/models/backbones/deit.py
+
+Models:
+ - Name: deit-tiny_pt-4xb256_in1k
+ Metadata:
+ FLOPs: 1080000000
+ Parameters: 5720000
+ In Collection: DeiT
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 74.50
+ Top 5 Accuracy: 92.24
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/deit/deit-tiny_pt-4xb256_in1k_20220218-13b382a0.pth
+ Config: configs/deit/deit-tiny_pt-4xb256_in1k.py
+ - Name: deit-tiny-distilled_3rdparty_pt-4xb256_in1k
+ Metadata:
+ FLOPs: 1080000000
+ Parameters: 5720000
+ In Collection: DeiT
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 74.51
+ Top 5 Accuracy: 91.90
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/deit/deit-tiny-distilled_3rdparty_pt-4xb256_in1k_20211216-c429839a.pth
+ Converted From:
+ Weights: https://dl.fbaipublicfiles.com/deit/deit_tiny_distilled_patch16_224-b40b3cf7.pth
+ Code: https://github.com/facebookresearch/deit/blob/f5123946205daf72a88783dae94cabff98c49c55/models.py#L108
+ Config: configs/deit/deit-tiny-distilled_pt-4xb256_in1k.py
+ - Name: deit-small_pt-4xb256_in1k
+ Metadata:
+ FLOPs: 4240000000
+ Parameters: 22050000
+ In Collection: DeiT
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 80.69
+ Top 5 Accuracy: 95.06
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/deit/deit-small_pt-4xb256_in1k_20220218-9425b9bb.pth
+ Config: configs/deit/deit-small_pt-4xb256_in1k.py
+ - Name: deit-small-distilled_3rdparty_pt-4xb256_in1k
+ Metadata:
+ FLOPs: 4240000000
+ Parameters: 22050000
+ In Collection: DeiT
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 81.17
+ Top 5 Accuracy: 95.40
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/deit/deit-small-distilled_3rdparty_pt-4xb256_in1k_20211216-4de1d725.pth
+ Converted From:
+ Weights: https://dl.fbaipublicfiles.com/deit/deit_small_distilled_patch16_224-649709d9.pth
+ Code: https://github.com/facebookresearch/deit/blob/f5123946205daf72a88783dae94cabff98c49c55/models.py#L123
+ Config: configs/deit/deit-small-distilled_pt-4xb256_in1k.py
+ - Name: deit-base_pt-16xb64_in1k
+ Metadata:
+ FLOPs: 16860000000
+ Parameters: 86570000
+ In Collection: DeiT
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 81.76
+ Top 5 Accuracy: 95.81
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/deit/deit-base_pt-16xb64_in1k_20220216-db63c16c.pth
+ Config: configs/deit/deit-base_pt-16xb64_in1k.py
+ - Name: deit-base_3rdparty_pt-16xb64_in1k
+ Metadata:
+ FLOPs: 16860000000
+ Parameters: 86570000
+ In Collection: DeiT
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 81.79
+ Top 5 Accuracy: 95.59
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/deit/deit-base_3rdparty_pt-16xb64_in1k_20211124-6f40c188.pth
+ Converted From:
+ Weights: https://dl.fbaipublicfiles.com/deit/deit_base_patch16_224-b5f2ef4d.pth
+ Code: https://github.com/facebookresearch/deit/blob/f5123946205daf72a88783dae94cabff98c49c55/models.py#L93
+ Config: configs/deit/deit-base_pt-16xb64_in1k.py
+ - Name: deit-base-distilled_3rdparty_pt-16xb64_in1k
+ Metadata:
+ FLOPs: 16860000000
+ Parameters: 86570000
+ In Collection: DeiT
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 83.33
+ Top 5 Accuracy: 96.49
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/deit/deit-base-distilled_3rdparty_pt-16xb64_in1k_20211216-42891296.pth
+ Converted From:
+ Weights: https://dl.fbaipublicfiles.com/deit/deit_base_distilled_patch16_224-df68dfff.pth
+ Code: https://github.com/facebookresearch/deit/blob/f5123946205daf72a88783dae94cabff98c49c55/models.py#L138
+ Config: configs/deit/deit-base-distilled_pt-16xb64_in1k.py
+ - Name: deit-base_3rdparty_ft-16xb32_in1k-384px
+ Metadata:
+ FLOPs: 49370000000
+ Parameters: 86860000
+ In Collection: DeiT
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 83.04
+ Top 5 Accuracy: 96.31
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/deit/deit-base_3rdparty_ft-16xb32_in1k-384px_20211124-822d02f2.pth
+ Converted From:
+ Weights: https://dl.fbaipublicfiles.com/deit/deit_base_patch16_384-8de9b5d1.pth
+ Code: https://github.com/facebookresearch/deit/blob/f5123946205daf72a88783dae94cabff98c49c55/models.py#L153
+ Config: configs/deit/deit-base_ft-16xb32_in1k-384px.py
+ - Name: deit-base-distilled_3rdparty_ft-16xb32_in1k-384px
+ Metadata:
+ FLOPs: 49370000000
+ Parameters: 86860000
+ In Collection: DeiT
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 85.55
+ Top 5 Accuracy: 97.35
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/deit/deit-base-distilled_3rdparty_ft-16xb32_in1k-384px_20211216-e48d6000.pth
+ Converted From:
+ Weights: https://dl.fbaipublicfiles.com/deit/deit_base_distilled_patch16_384-d0272ac0.pth
+ Code: https://github.com/facebookresearch/deit/blob/f5123946205daf72a88783dae94cabff98c49c55/models.py#L168
+ Config: configs/deit/deit-base-distilled_ft-16xb32_in1k-384px.py
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/densenet/README.md b/openmmlab_test/mmclassification-0.24.1/configs/densenet/README.md
new file mode 100644
index 00000000..f07f25c9
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/densenet/README.md
@@ -0,0 +1,41 @@
+# DenseNet
+
+> [Densely Connected Convolutional Networks](https://arxiv.org/abs/1608.06993)
+
+
+
+## Abstract
+
+Recent work has shown that convolutional networks can be substantially deeper, more accurate, and efficient to train if they contain shorter connections between layers close to the input and those close to the output. In this paper, we embrace this observation and introduce the Dense Convolutional Network (DenseNet), which connects each layer to every other layer in a feed-forward fashion. Whereas traditional convolutional networks with L layers have L connections - one between each layer and its subsequent layer - our network has L(L+1)/2 direct connections. For each layer, the feature-maps of all preceding layers are used as inputs, and its own feature-maps are used as inputs into all subsequent layers. DenseNets have several compelling advantages: they alleviate the vanishing-gradient problem, strengthen feature propagation, encourage feature reuse, and substantially reduce the number of parameters. We evaluate our proposed architecture on four highly competitive object recognition benchmark tasks (CIFAR-10, CIFAR-100, SVHN, and ImageNet). DenseNets obtain significant improvements over the state-of-the-art on most of them, whilst requiring less computation to achieve high performance.
+
+
+

+
+
+## Results and models
+
+### ImageNet-1k
+
+| Model | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download |
+| :-----------: | :-------: | :------: | :-------: | :-------: | :-------------------------------------------------------------------------: | :---------------------------------------------------------------------------: |
+| DenseNet121\* | 7.98 | 2.88 | 74.96 | 92.21 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/densenet/densenet121_4xb256_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/densenet/densenet121_4xb256_in1k_20220426-07450f99.pth) |
+| DenseNet169\* | 14.15 | 3.42 | 76.08 | 93.11 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/densenet/densenet169_4xb256_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/densenet/densenet169_4xb256_in1k_20220426-a2889902.pth) |
+| DenseNet201\* | 20.01 | 4.37 | 77.32 | 93.64 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/densenet/densenet201_4xb256_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/densenet/densenet201_4xb256_in1k_20220426-05cae4ef.pth) |
+| DenseNet161\* | 28.68 | 7.82 | 77.61 | 93.83 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/densenet/densenet161_4xb256_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/densenet/densenet161_4xb256_in1k_20220426-ee6a80a9.pth) |
+
+*Models with * are converted from [pytorch](https://pytorch.org/vision/stable/models.html), guided by [original repo](https://github.com/liuzhuang13/DenseNet). The config files of these models are only for inference. We don't ensure these config files' training accuracy and welcome you to contribute your reproduction results.*
+
+## Citation
+
+```bibtex
+@misc{https://doi.org/10.48550/arxiv.1608.06993,
+ doi = {10.48550/ARXIV.1608.06993},
+ url = {https://arxiv.org/abs/1608.06993},
+ author = {Huang, Gao and Liu, Zhuang and van der Maaten, Laurens and Weinberger, Kilian Q.},
+ keywords = {Computer Vision and Pattern Recognition (cs.CV), Machine Learning (cs.LG), FOS: Computer and information sciences, FOS: Computer and information sciences},
+ title = {Densely Connected Convolutional Networks},
+ publisher = {arXiv},
+ year = {2016},
+ copyright = {arXiv.org perpetual, non-exclusive license}
+}
+```
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/densenet/densenet121_4xb256_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/densenet/densenet121_4xb256_in1k.py
new file mode 100644
index 00000000..08d65ae2
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/densenet/densenet121_4xb256_in1k.py
@@ -0,0 +1,10 @@
+_base_ = [
+ '../_base_/models/densenet/densenet121.py',
+ '../_base_/datasets/imagenet_bs64.py',
+ '../_base_/schedules/imagenet_bs256.py',
+ '../_base_/default_runtime.py',
+]
+
+data = dict(samples_per_gpu=256)
+
+runner = dict(type='EpochBasedRunner', max_epochs=90)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/densenet/densenet161_4xb256_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/densenet/densenet161_4xb256_in1k.py
new file mode 100644
index 00000000..4581d1de
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/densenet/densenet161_4xb256_in1k.py
@@ -0,0 +1,10 @@
+_base_ = [
+ '../_base_/models/densenet/densenet161.py',
+ '../_base_/datasets/imagenet_bs64.py',
+ '../_base_/schedules/imagenet_bs256.py',
+ '../_base_/default_runtime.py',
+]
+
+data = dict(samples_per_gpu=256)
+
+runner = dict(type='EpochBasedRunner', max_epochs=90)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/densenet/densenet169_4xb256_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/densenet/densenet169_4xb256_in1k.py
new file mode 100644
index 00000000..6179293b
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/densenet/densenet169_4xb256_in1k.py
@@ -0,0 +1,10 @@
+_base_ = [
+ '../_base_/models/densenet/densenet169.py',
+ '../_base_/datasets/imagenet_bs64.py',
+ '../_base_/schedules/imagenet_bs256.py',
+ '../_base_/default_runtime.py',
+]
+
+data = dict(samples_per_gpu=256)
+
+runner = dict(type='EpochBasedRunner', max_epochs=90)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/densenet/densenet201_4xb256_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/densenet/densenet201_4xb256_in1k.py
new file mode 100644
index 00000000..897a141d
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/densenet/densenet201_4xb256_in1k.py
@@ -0,0 +1,10 @@
+_base_ = [
+ '../_base_/models/densenet/densenet201.py',
+ '../_base_/datasets/imagenet_bs64.py',
+ '../_base_/schedules/imagenet_bs256.py',
+ '../_base_/default_runtime.py',
+]
+
+data = dict(samples_per_gpu=256)
+
+runner = dict(type='EpochBasedRunner', max_epochs=90)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/densenet/metafile.yml b/openmmlab_test/mmclassification-0.24.1/configs/densenet/metafile.yml
new file mode 100644
index 00000000..84366b23
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/densenet/metafile.yml
@@ -0,0 +1,76 @@
+Collections:
+ - Name: DenseNet
+ Metadata:
+ Training Data: ImageNet-1k
+ Architecture:
+ - DenseBlock
+ Paper:
+ URL: https://arxiv.org/abs/1608.06993
+ Title: Densely Connected Convolutional Networks
+ README: configs/densenet/README.md
+
+Models:
+ - Name: densenet121_4xb256_in1k
+ Metadata:
+ FLOPs: 2881695488
+ Parameters: 7978856
+ In Collections: DenseNet
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 74.96
+ Top 5 Accuracy: 92.21
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/densenet/densenet121_4xb256_in1k_20220426-07450f99.pth
+ Config: configs/densenet/densenet121_4xb256_in1k.py
+ Converted From:
+ Weights: https://download.pytorch.org/models/densenet121-a639ec97.pth
+ Code: https://github.com/pytorch/vision/blob/main/torchvision/models/densenet.py
+ - Name: densenet169_4xb256_in1k
+ Metadata:
+ FLOPs: 3416860160
+ Parameters: 14149480
+ In Collections: DenseNet
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 76.08
+ Top 5 Accuracy: 93.11
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/densenet/densenet169_4xb256_in1k_20220426-a2889902.pth
+ Config: configs/densenet/densenet169_4xb256_in1k.py
+ Converted From:
+ Weights: https://download.pytorch.org/models/densenet169-b2777c0a.pth
+ Code: https://github.com/pytorch/vision/blob/main/torchvision/models/densenet.py
+ - Name: densenet201_4xb256_in1k
+ Metadata:
+ FLOPs: 4365236736
+ Parameters: 20013928
+ In Collections: DenseNet
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 77.32
+ Top 5 Accuracy: 93.64
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/densenet/densenet201_4xb256_in1k_20220426-05cae4ef.pth
+ Config: configs/densenet/densenet201_4xb256_in1k.py
+ Converted From:
+ Weights: https://download.pytorch.org/models/densenet201-c1103571.pth
+ Code: https://github.com/pytorch/vision/blob/main/torchvision/models/densenet.py
+ - Name: densenet161_4xb256_in1k
+ Metadata:
+ FLOPs: 7816363968
+ Parameters: 28681000
+ In Collections: DenseNet
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 77.61
+ Top 5 Accuracy: 93.83
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/densenet/densenet161_4xb256_in1k_20220426-ee6a80a9.pth
+ Config: configs/densenet/densenet161_4xb256_in1k.py
+ Converted From:
+ Weights: https://download.pytorch.org/models/densenet161-8d451a50.pth
+ Code: https://github.com/pytorch/vision/blob/main/torchvision/models/densenet.py
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/efficientformer/README.md b/openmmlab_test/mmclassification-0.24.1/configs/efficientformer/README.md
new file mode 100644
index 00000000..ecd6b492
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/efficientformer/README.md
@@ -0,0 +1,47 @@
+# EfficientFormer
+
+> [EfficientFormer: Vision Transformers at MobileNet Speed](https://arxiv.org/abs/2206.01191)
+
+
+
+## Abstract
+
+Vision Transformers (ViT) have shown rapid progress in computer vision tasks, achieving promising results on various benchmarks. However, due to the massive number of parameters and model design, e.g., attention mechanism, ViT-based models are generally times slower than lightweight convolutional networks. Therefore, the deployment of ViT for real-time applications is particularly challenging, especially on resource-constrained hardware such as mobile devices. Recent efforts try to reduce the computation complexity of ViT through network architecture search or hybrid design with MobileNet block, yet the inference speed is still unsatisfactory. This leads to an important question: can transformers run as fast as MobileNet while obtaining high performance? To answer this, we first revisit the network architecture and operators used in ViT-based models and identify inefficient designs. Then we introduce a dimension-consistent pure transformer (without MobileNet blocks) as a design paradigm. Finally, we perform latency-driven slimming to get a series of final models dubbed EfficientFormer. Extensive experiments show the superiority of EfficientFormer in performance and speed on mobile devices. Our fastest model, EfficientFormer-L1, achieves 79.2% top-1 accuracy on ImageNet-1K with only 1.6 ms inference latency on iPhone 12 (compiled with CoreML), which runs as fast as MobileNetV2×1.4 (1.6 ms, 74.7% top-1), and our largest model, EfficientFormer-L7, obtains 83.3% accuracy with only 7.0 ms latency. Our work proves that properly designed transformers can reach extremely low latency on mobile devices while maintaining high performance.
+
+
+

+
+
+## Results and models
+
+### ImageNet-1k
+
+| Model | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download |
+| :------------------: | :-------: | :------: | :-------: | :-------: | :---------------------------------------------------------------------: | :------------------------------------------------------------------------: |
+| EfficientFormer-l1\* | 12.19 | 1.30 | 80.46 | 94.99 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/efficientformer/efficientformer-l1_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientformer/efficientformer-l1_3rdparty_in1k_20220803-d66e61df.pth) |
+| EfficientFormer-l3\* | 31.41 | 3.93 | 82.45 | 96.18 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/efficientformer/efficientformer-l3_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientformer/efficientformer-l3_3rdparty_in1k_20220803-dde1c8c5.pth) |
+| EfficientFormer-l7\* | 82.23 | 10.16 | 83.40 | 96.60 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/efficientformer/efficientformer-l7_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientformer/efficientformer-l7_3rdparty_in1k_20220803-41a552bb.pth) |
+
+*Models with * are converted from the [official repo](https://github.com/snap-research/EfficientFormer). The config files of these models are only for inference. We don't ensure these config files' training accuracy and welcome you to contribute your reproduction results.*
+
+## Citation
+
+```bibtex
+@misc{https://doi.org/10.48550/arxiv.2206.01191,
+ doi = {10.48550/ARXIV.2206.01191},
+
+ url = {https://arxiv.org/abs/2206.01191},
+
+ author = {Li, Yanyu and Yuan, Geng and Wen, Yang and Hu, Eric and Evangelidis, Georgios and Tulyakov, Sergey and Wang, Yanzhi and Ren, Jian},
+
+ keywords = {Computer Vision and Pattern Recognition (cs.CV), FOS: Computer and information sciences, FOS: Computer and information sciences},
+
+ title = {EfficientFormer: Vision Transformers at MobileNet Speed},
+
+ publisher = {arXiv},
+
+ year = {2022},
+
+ copyright = {Creative Commons Attribution 4.0 International}
+}
+```
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/efficientformer/efficientformer-l1_8xb128_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/efficientformer/efficientformer-l1_8xb128_in1k.py
new file mode 100644
index 00000000..f5db2bfc
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/efficientformer/efficientformer-l1_8xb128_in1k.py
@@ -0,0 +1,24 @@
+_base_ = [
+ '../_base_/datasets/imagenet_bs128_poolformer_small_224.py',
+ '../_base_/schedules/imagenet_bs1024_adamw_swin.py',
+ '../_base_/default_runtime.py',
+]
+
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='EfficientFormer',
+ arch='l1',
+ drop_path_rate=0,
+ init_cfg=[
+ dict(
+ type='TruncNormal',
+ layer=['Conv2d', 'Linear'],
+ std=.02,
+ bias=0.),
+ dict(type='Constant', layer=['GroupNorm'], val=1., bias=0.),
+ dict(type='Constant', layer=['LayerScale'], val=1e-5)
+ ]),
+ neck=dict(type='GlobalAveragePooling', dim=1),
+ head=dict(
+ type='EfficientFormerClsHead', in_channels=448, num_classes=1000))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/efficientformer/efficientformer-l3_8xb128_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/efficientformer/efficientformer-l3_8xb128_in1k.py
new file mode 100644
index 00000000..e920f785
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/efficientformer/efficientformer-l3_8xb128_in1k.py
@@ -0,0 +1,24 @@
+_base_ = [
+ '../_base_/datasets/imagenet_bs128_poolformer_small_224.py',
+ '../_base_/schedules/imagenet_bs1024_adamw_swin.py',
+ '../_base_/default_runtime.py',
+]
+
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='EfficientFormer',
+ arch='l3',
+ drop_path_rate=0,
+ init_cfg=[
+ dict(
+ type='TruncNormal',
+ layer=['Conv2d', 'Linear'],
+ std=.02,
+ bias=0.),
+ dict(type='Constant', layer=['GroupNorm'], val=1., bias=0.),
+ dict(type='Constant', layer=['LayerScale'], val=1e-5)
+ ]),
+ neck=dict(type='GlobalAveragePooling', dim=1),
+ head=dict(
+ type='EfficientFormerClsHead', in_channels=512, num_classes=1000))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/efficientformer/efficientformer-l7_8xb128_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/efficientformer/efficientformer-l7_8xb128_in1k.py
new file mode 100644
index 00000000..a59e3a7e
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/efficientformer/efficientformer-l7_8xb128_in1k.py
@@ -0,0 +1,24 @@
+_base_ = [
+ '../_base_/datasets/imagenet_bs128_poolformer_small_224.py',
+ '../_base_/schedules/imagenet_bs1024_adamw_swin.py',
+ '../_base_/default_runtime.py',
+]
+
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='EfficientFormer',
+ arch='l7',
+ drop_path_rate=0,
+ init_cfg=[
+ dict(
+ type='TruncNormal',
+ layer=['Conv2d', 'Linear'],
+ std=.02,
+ bias=0.),
+ dict(type='Constant', layer=['GroupNorm'], val=1., bias=0.),
+ dict(type='Constant', layer=['LayerScale'], val=1e-5)
+ ]),
+ neck=dict(type='GlobalAveragePooling', dim=1),
+ head=dict(
+ type='EfficientFormerClsHead', in_channels=768, num_classes=1000))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/efficientformer/metafile.yml b/openmmlab_test/mmclassification-0.24.1/configs/efficientformer/metafile.yml
new file mode 100644
index 00000000..33c47865
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/efficientformer/metafile.yml
@@ -0,0 +1,67 @@
+Collections:
+ - Name: EfficientFormer
+ Metadata:
+ Training Data: ImageNet-1k
+ Architecture:
+ - Pooling
+ - 1x1 Convolution
+ - LayerScale
+ - MetaFormer
+ Paper:
+ URL: https://arxiv.org/pdf/2206.01191.pdf
+ Title: "EfficientFormer: Vision Transformers at MobileNet Speed"
+ README: configs/efficientformer/README.md
+ Code:
+ Version: v0.24.0
+ URL: https://github.com/open-mmlab/mmclassification/blob/v0.24.0/mmcls/models/backbones/efficientformer.py
+
+Models:
+ - Name: efficientformer-l1_3rdparty_8xb128_in1k
+ Metadata:
+ FLOPs: 1304601088 # 1.3G
+ Parameters: 12278696 # 12M
+ In Collections: EfficientFormer
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 80.46
+ Top 5 Accuracy: 94.99
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/efficientformer/efficientformer-l1_3rdparty_in1k_20220803-d66e61df.pth
+ Config: configs/efficientformer/efficientformer-l1_8xb128_in1k.py
+ Converted From:
+ Weights: https://drive.google.com/file/d/11SbX-3cfqTOc247xKYubrAjBiUmr818y/view?usp=sharing
+ Code: https://github.com/snap-research/EfficientFormer
+ - Name: efficientformer-l3_3rdparty_8xb128_in1k
+ Metadata:
+ Training Data: ImageNet-1k
+ FLOPs: 3737045760 # 3.7G
+ Parameters: 31406000 # 31M
+ In Collections: EfficientFormer
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 82.45
+ Top 5 Accuracy: 96.18
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/efficientformer/efficientformer-l3_3rdparty_in1k_20220803-dde1c8c5.pth
+ Config: configs/efficientformer/efficientformer-l3_8xb128_in1k.py
+ Converted From:
+ Weights: https://drive.google.com/file/d/1OyyjKKxDyMj-BcfInp4GlDdwLu3hc30m/view?usp=sharing
+ Code: https://github.com/snap-research/EfficientFormer
+ - Name: efficientformer-l7_3rdparty_8xb128_in1k
+ Metadata:
+ FLOPs: 10163951616 # 10.2G
+ Parameters: 82229328 # 82M
+ In Collections: EfficientFormer
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 83.40
+ Top 5 Accuracy: 96.60
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/efficientformer/efficientformer-l7_3rdparty_in1k_20220803-41a552bb.pth
+ Config: configs/efficientformer/efficientformer-l7_8xb128_in1k.py
+ Converted From:
+ Weights: https://drive.google.com/file/d/1cVw-pctJwgvGafeouynqWWCwgkcoFMM5/view?usp=sharing
+ Code: https://github.com/snap-research/EfficientFormer
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/efficientnet/README.md b/openmmlab_test/mmclassification-0.24.1/configs/efficientnet/README.md
new file mode 100644
index 00000000..832f5c6b
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/efficientnet/README.md
@@ -0,0 +1,62 @@
+# EfficientNet
+
+> [Rethinking Model Scaling for Convolutional Neural Networks](https://arxiv.org/abs/1905.11946v5)
+
+
+
+## Abstract
+
+Convolutional Neural Networks (ConvNets) are commonly developed at a fixed resource budget, and then scaled up for better accuracy if more resources are available. In this paper, we systematically study model scaling and identify that carefully balancing network depth, width, and resolution can lead to better performance. Based on this observation, we propose a new scaling method that uniformly scales all dimensions of depth/width/resolution using a simple yet highly effective compound coefficient. We demonstrate the effectiveness of this method on scaling up MobileNets and ResNet. To go even further, we use neural architecture search to design a new baseline network and scale it up to obtain a family of models, called EfficientNets, which achieve much better accuracy and efficiency than previous ConvNets. In particular, our EfficientNet-B7 achieves state-of-the-art 84.3% top-1 accuracy on ImageNet, while being 8.4x smaller and 6.1x faster on inference than the best existing ConvNet. Our EfficientNets also transfer well and achieve state-of-the-art accuracy on CIFAR-100 (91.7%), Flowers (98.8%), and 3 other transfer learning datasets, with an order of magnitude fewer parameters.
+
+
+

+
+
+## Results and models
+
+### ImageNet-1k
+
+In the result table, AA means trained with AutoAugment pre-processing, more details can be found in the [paper](https://arxiv.org/abs/1805.09501), and AdvProp is a method to train with adversarial examples, more details can be found in the [paper](https://arxiv.org/abs/1911.09665).
+
+Note: In MMClassification, we support training with AutoAugment, don't support AdvProp by now.
+
+| Model | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download |
+| :------------------------------: | :-------: | :------: | :-------: | :-------: | :---------------------------------------------------------------: | :------------------------------------------------------------------: |
+| EfficientNet-B0\* | 5.29 | 0.02 | 76.74 | 93.17 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/efficientnet/efficientnet-b0_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b0_3rdparty_8xb32_in1k_20220119-a7e2a0b1.pth) |
+| EfficientNet-B0 (AA)\* | 5.29 | 0.02 | 77.26 | 93.41 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/efficientnet/efficientnet-b0_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b0_3rdparty_8xb32-aa_in1k_20220119-8d939117.pth) |
+| EfficientNet-B0 (AA + AdvProp)\* | 5.29 | 0.02 | 77.53 | 93.61 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/efficientnet/efficientnet-b0_8xb32-01norm_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b0_3rdparty_8xb32-aa-advprop_in1k_20220119-26434485.pth) |
+| EfficientNet-B1\* | 7.79 | 0.03 | 78.68 | 94.28 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/efficientnet/efficientnet-b1_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b1_3rdparty_8xb32_in1k_20220119-002556d9.pth) |
+| EfficientNet-B1 (AA)\* | 7.79 | 0.03 | 79.20 | 94.42 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/efficientnet/efficientnet-b1_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b1_3rdparty_8xb32-aa_in1k_20220119-619d8ae3.pth) |
+| EfficientNet-B1 (AA + AdvProp)\* | 7.79 | 0.03 | 79.52 | 94.43 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/efficientnet/efficientnet-b1_8xb32-01norm_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b1_3rdparty_8xb32-aa-advprop_in1k_20220119-5715267d.pth) |
+| EfficientNet-B2\* | 9.11 | 0.03 | 79.64 | 94.80 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/efficientnet/efficientnet-b2_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b2_3rdparty_8xb32_in1k_20220119-ea374a30.pth) |
+| EfficientNet-B2 (AA)\* | 9.11 | 0.03 | 80.21 | 94.96 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/efficientnet/efficientnet-b2_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b2_3rdparty_8xb32-aa_in1k_20220119-dd61e80b.pth) |
+| EfficientNet-B2 (AA + AdvProp)\* | 9.11 | 0.03 | 80.45 | 95.07 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/efficientnet/efficientnet-b2_8xb32-01norm_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b2_3rdparty_8xb32-aa-advprop_in1k_20220119-1655338a.pth) |
+| EfficientNet-B3\* | 12.23 | 0.06 | 81.01 | 95.34 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/efficientnet/efficientnet-b3_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b3_3rdparty_8xb32_in1k_20220119-4b4d7487.pth) |
+| EfficientNet-B3 (AA)\* | 12.23 | 0.06 | 81.58 | 95.67 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/efficientnet/efficientnet-b3_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b3_3rdparty_8xb32-aa_in1k_20220119-5b4887a0.pth) |
+| EfficientNet-B3 (AA + AdvProp)\* | 12.23 | 0.06 | 81.81 | 95.69 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/efficientnet/efficientnet-b3_8xb32-01norm_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b3_3rdparty_8xb32-aa-advprop_in1k_20220119-53b41118.pth) |
+| EfficientNet-B4\* | 19.34 | 0.12 | 82.57 | 96.09 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/efficientnet/efficientnet-b4_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b4_3rdparty_8xb32_in1k_20220119-81fd4077.pth) |
+| EfficientNet-B4 (AA)\* | 19.34 | 0.12 | 82.95 | 96.26 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/efficientnet/efficientnet-b4_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b4_3rdparty_8xb32-aa_in1k_20220119-45b8bd2b.pth) |
+| EfficientNet-B4 (AA + AdvProp)\* | 19.34 | 0.12 | 83.25 | 96.44 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/efficientnet/efficientnet-b4_8xb32-01norm_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b4_3rdparty_8xb32-aa-advprop_in1k_20220119-38c2238c.pth) |
+| EfficientNet-B5\* | 30.39 | 0.24 | 83.18 | 96.47 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/efficientnet/efficientnet-b5_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b5_3rdparty_8xb32_in1k_20220119-e9814430.pth) |
+| EfficientNet-B5 (AA)\* | 30.39 | 0.24 | 83.82 | 96.76 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/efficientnet/efficientnet-b5_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b5_3rdparty_8xb32-aa_in1k_20220119-2cab8b78.pth) |
+| EfficientNet-B5 (AA + AdvProp)\* | 30.39 | 0.24 | 84.21 | 96.98 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/efficientnet/efficientnet-b5_8xb32-01norm_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b5_3rdparty_8xb32-aa-advprop_in1k_20220119-f57a895a.pth) |
+| EfficientNet-B6 (AA)\* | 43.04 | 0.41 | 84.05 | 96.82 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/efficientnet/efficientnet-b6_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b6_3rdparty_8xb32-aa_in1k_20220119-45b03310.pth) |
+| EfficientNet-B6 (AA + AdvProp)\* | 43.04 | 0.41 | 84.74 | 97.14 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/efficientnet/efficientnet-b6_8xb32-01norm_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b6_3rdparty_8xb32-aa-advprop_in1k_20220119-bfe3485e.pth) |
+| EfficientNet-B7 (AA)\* | 66.35 | 0.72 | 84.38 | 96.88 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/efficientnet/efficientnet-b7_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b7_3rdparty_8xb32-aa_in1k_20220119-bf03951c.pth) |
+| EfficientNet-B7 (AA + AdvProp)\* | 66.35 | 0.72 | 85.14 | 97.23 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/efficientnet/efficientnet-b7_8xb32-01norm_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b7_3rdparty_8xb32-aa-advprop_in1k_20220119-c6dbff10.pth) |
+| EfficientNet-B8 (AA + AdvProp)\* | 87.41 | 1.09 | 85.38 | 97.28 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/efficientnet/efficientnet-b8_8xb32-01norm_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b8_3rdparty_8xb32-aa-advprop_in1k_20220119-297ce1b7.pth) |
+
+*Models with * are converted from the [official repo](https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet). The config files of these models are only for inference. We don't ensure these config files' training accuracy and welcome you to contribute your reproduction results.*
+
+## Citation
+
+```
+@inproceedings{tan2019efficientnet,
+ title={Efficientnet: Rethinking model scaling for convolutional neural networks},
+ author={Tan, Mingxing and Le, Quoc},
+ booktitle={International Conference on Machine Learning},
+ pages={6105--6114},
+ year={2019},
+ organization={PMLR}
+}
+```
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/efficientnet/efficientnet-b0_8xb32-01norm_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/efficientnet/efficientnet-b0_8xb32-01norm_in1k.py
new file mode 100644
index 00000000..fbb490d9
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/efficientnet/efficientnet-b0_8xb32-01norm_in1k.py
@@ -0,0 +1,39 @@
+_base_ = [
+ '../_base_/models/efficientnet_b0.py',
+ '../_base_/datasets/imagenet_bs32.py',
+ '../_base_/schedules/imagenet_bs256.py',
+ '../_base_/default_runtime.py',
+]
+
+# dataset settings
+dataset_type = 'ImageNet'
+img_norm_cfg = dict(
+ mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_rgb=True)
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='RandomResizedCrop',
+ size=224,
+ efficientnet_style=True,
+ interpolation='bicubic'),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='CenterCrop',
+ crop_size=224,
+ efficientnet_style=True,
+ interpolation='bicubic'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+data = dict(
+ train=dict(pipeline=train_pipeline),
+ val=dict(pipeline=test_pipeline),
+ test=dict(pipeline=test_pipeline))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/efficientnet/efficientnet-b0_8xb32_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/efficientnet/efficientnet-b0_8xb32_in1k.py
new file mode 100644
index 00000000..33931e5f
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/efficientnet/efficientnet-b0_8xb32_in1k.py
@@ -0,0 +1,39 @@
+_base_ = [
+ '../_base_/models/efficientnet_b0.py',
+ '../_base_/datasets/imagenet_bs32.py',
+ '../_base_/schedules/imagenet_bs256.py',
+ '../_base_/default_runtime.py',
+]
+
+# dataset settings
+dataset_type = 'ImageNet'
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='RandomResizedCrop',
+ size=224,
+ efficientnet_style=True,
+ interpolation='bicubic'),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='CenterCrop',
+ crop_size=224,
+ efficientnet_style=True,
+ interpolation='bicubic'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+data = dict(
+ train=dict(pipeline=train_pipeline),
+ val=dict(pipeline=test_pipeline),
+ test=dict(pipeline=test_pipeline))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/efficientnet/efficientnet-b1_8xb32-01norm_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/efficientnet/efficientnet-b1_8xb32-01norm_in1k.py
new file mode 100644
index 00000000..6b66395c
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/efficientnet/efficientnet-b1_8xb32-01norm_in1k.py
@@ -0,0 +1,39 @@
+_base_ = [
+ '../_base_/models/efficientnet_b1.py',
+ '../_base_/datasets/imagenet_bs32.py',
+ '../_base_/schedules/imagenet_bs256.py',
+ '../_base_/default_runtime.py',
+]
+
+# dataset settings
+dataset_type = 'ImageNet'
+img_norm_cfg = dict(
+ mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_rgb=True)
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='RandomResizedCrop',
+ size=240,
+ efficientnet_style=True,
+ interpolation='bicubic'),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='CenterCrop',
+ crop_size=240,
+ efficientnet_style=True,
+ interpolation='bicubic'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+data = dict(
+ train=dict(pipeline=train_pipeline),
+ val=dict(pipeline=test_pipeline),
+ test=dict(pipeline=test_pipeline))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/efficientnet/efficientnet-b1_8xb32_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/efficientnet/efficientnet-b1_8xb32_in1k.py
new file mode 100644
index 00000000..d702a150
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/efficientnet/efficientnet-b1_8xb32_in1k.py
@@ -0,0 +1,39 @@
+_base_ = [
+ '../_base_/models/efficientnet_b1.py',
+ '../_base_/datasets/imagenet_bs32.py',
+ '../_base_/schedules/imagenet_bs256.py',
+ '../_base_/default_runtime.py',
+]
+
+# dataset settings
+dataset_type = 'ImageNet'
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='RandomResizedCrop',
+ size=240,
+ efficientnet_style=True,
+ interpolation='bicubic'),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='CenterCrop',
+ crop_size=240,
+ efficientnet_style=True,
+ interpolation='bicubic'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+data = dict(
+ train=dict(pipeline=train_pipeline),
+ val=dict(pipeline=test_pipeline),
+ test=dict(pipeline=test_pipeline))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/efficientnet/efficientnet-b2_8xb32-01norm_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/efficientnet/efficientnet-b2_8xb32-01norm_in1k.py
new file mode 100644
index 00000000..ae8cda84
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/efficientnet/efficientnet-b2_8xb32-01norm_in1k.py
@@ -0,0 +1,39 @@
+_base_ = [
+ '../_base_/models/efficientnet_b2.py',
+ '../_base_/datasets/imagenet_bs32.py',
+ '../_base_/schedules/imagenet_bs256.py',
+ '../_base_/default_runtime.py',
+]
+
+# dataset settings
+dataset_type = 'ImageNet'
+img_norm_cfg = dict(
+ mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_rgb=True)
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='RandomResizedCrop',
+ size=260,
+ efficientnet_style=True,
+ interpolation='bicubic'),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='CenterCrop',
+ crop_size=260,
+ efficientnet_style=True,
+ interpolation='bicubic'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+data = dict(
+ train=dict(pipeline=train_pipeline),
+ val=dict(pipeline=test_pipeline),
+ test=dict(pipeline=test_pipeline))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/efficientnet/efficientnet-b2_8xb32_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/efficientnet/efficientnet-b2_8xb32_in1k.py
new file mode 100644
index 00000000..53f7c84d
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/efficientnet/efficientnet-b2_8xb32_in1k.py
@@ -0,0 +1,39 @@
+_base_ = [
+ '../_base_/models/efficientnet_b2.py',
+ '../_base_/datasets/imagenet_bs32.py',
+ '../_base_/schedules/imagenet_bs256.py',
+ '../_base_/default_runtime.py',
+]
+
+# dataset settings
+dataset_type = 'ImageNet'
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='RandomResizedCrop',
+ size=260,
+ efficientnet_style=True,
+ interpolation='bicubic'),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='CenterCrop',
+ crop_size=260,
+ efficientnet_style=True,
+ interpolation='bicubic'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+data = dict(
+ train=dict(pipeline=train_pipeline),
+ val=dict(pipeline=test_pipeline),
+ test=dict(pipeline=test_pipeline))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/efficientnet/efficientnet-b3_8xb32-01norm_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/efficientnet/efficientnet-b3_8xb32-01norm_in1k.py
new file mode 100644
index 00000000..dfd3f92c
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/efficientnet/efficientnet-b3_8xb32-01norm_in1k.py
@@ -0,0 +1,39 @@
+_base_ = [
+ '../_base_/models/efficientnet_b3.py',
+ '../_base_/datasets/imagenet_bs32.py',
+ '../_base_/schedules/imagenet_bs256.py',
+ '../_base_/default_runtime.py',
+]
+
+# dataset settings
+dataset_type = 'ImageNet'
+img_norm_cfg = dict(
+ mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_rgb=True)
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='RandomResizedCrop',
+ size=300,
+ efficientnet_style=True,
+ interpolation='bicubic'),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='CenterCrop',
+ crop_size=300,
+ efficientnet_style=True,
+ interpolation='bicubic'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+data = dict(
+ train=dict(pipeline=train_pipeline),
+ val=dict(pipeline=test_pipeline),
+ test=dict(pipeline=test_pipeline))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/efficientnet/efficientnet-b3_8xb32_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/efficientnet/efficientnet-b3_8xb32_in1k.py
new file mode 100644
index 00000000..28387138
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/efficientnet/efficientnet-b3_8xb32_in1k.py
@@ -0,0 +1,39 @@
+_base_ = [
+ '../_base_/models/efficientnet_b3.py',
+ '../_base_/datasets/imagenet_bs32.py',
+ '../_base_/schedules/imagenet_bs256.py',
+ '../_base_/default_runtime.py',
+]
+
+# dataset settings
+dataset_type = 'ImageNet'
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='RandomResizedCrop',
+ size=300,
+ efficientnet_style=True,
+ interpolation='bicubic'),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='CenterCrop',
+ crop_size=300,
+ efficientnet_style=True,
+ interpolation='bicubic'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+data = dict(
+ train=dict(pipeline=train_pipeline),
+ val=dict(pipeline=test_pipeline),
+ test=dict(pipeline=test_pipeline))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/efficientnet/efficientnet-b4_8xb32-01norm_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/efficientnet/efficientnet-b4_8xb32-01norm_in1k.py
new file mode 100644
index 00000000..333a19ac
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/efficientnet/efficientnet-b4_8xb32-01norm_in1k.py
@@ -0,0 +1,39 @@
+_base_ = [
+ '../_base_/models/efficientnet_b4.py',
+ '../_base_/datasets/imagenet_bs32.py',
+ '../_base_/schedules/imagenet_bs256.py',
+ '../_base_/default_runtime.py',
+]
+
+# dataset settings
+dataset_type = 'ImageNet'
+img_norm_cfg = dict(
+ mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_rgb=True)
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='RandomResizedCrop',
+ size=380,
+ efficientnet_style=True,
+ interpolation='bicubic'),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='CenterCrop',
+ crop_size=380,
+ efficientnet_style=True,
+ interpolation='bicubic'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+data = dict(
+ train=dict(pipeline=train_pipeline),
+ val=dict(pipeline=test_pipeline),
+ test=dict(pipeline=test_pipeline))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/efficientnet/efficientnet-b4_8xb32_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/efficientnet/efficientnet-b4_8xb32_in1k.py
new file mode 100644
index 00000000..82f06cde
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/efficientnet/efficientnet-b4_8xb32_in1k.py
@@ -0,0 +1,39 @@
+_base_ = [
+ '../_base_/models/efficientnet_b4.py',
+ '../_base_/datasets/imagenet_bs32.py',
+ '../_base_/schedules/imagenet_bs256.py',
+ '../_base_/default_runtime.py',
+]
+
+# dataset settings
+dataset_type = 'ImageNet'
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='RandomResizedCrop',
+ size=380,
+ efficientnet_style=True,
+ interpolation='bicubic'),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='CenterCrop',
+ crop_size=380,
+ efficientnet_style=True,
+ interpolation='bicubic'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+data = dict(
+ train=dict(pipeline=train_pipeline),
+ val=dict(pipeline=test_pipeline),
+ test=dict(pipeline=test_pipeline))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/efficientnet/efficientnet-b5_8xb32-01norm_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/efficientnet/efficientnet-b5_8xb32-01norm_in1k.py
new file mode 100644
index 00000000..f66855c5
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/efficientnet/efficientnet-b5_8xb32-01norm_in1k.py
@@ -0,0 +1,39 @@
+_base_ = [
+ '../_base_/models/efficientnet_b5.py',
+ '../_base_/datasets/imagenet_bs32.py',
+ '../_base_/schedules/imagenet_bs256.py',
+ '../_base_/default_runtime.py',
+]
+
+# dataset settings
+dataset_type = 'ImageNet'
+img_norm_cfg = dict(
+ mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_rgb=True)
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='RandomResizedCrop',
+ size=456,
+ efficientnet_style=True,
+ interpolation='bicubic'),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='CenterCrop',
+ crop_size=456,
+ efficientnet_style=True,
+ interpolation='bicubic'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+data = dict(
+ train=dict(pipeline=train_pipeline),
+ val=dict(pipeline=test_pipeline),
+ test=dict(pipeline=test_pipeline))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/efficientnet/efficientnet-b5_8xb32_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/efficientnet/efficientnet-b5_8xb32_in1k.py
new file mode 100644
index 00000000..9b0eaab0
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/efficientnet/efficientnet-b5_8xb32_in1k.py
@@ -0,0 +1,39 @@
+_base_ = [
+ '../_base_/models/efficientnet_b5.py',
+ '../_base_/datasets/imagenet_bs32.py',
+ '../_base_/schedules/imagenet_bs256.py',
+ '../_base_/default_runtime.py',
+]
+
+# dataset settings
+dataset_type = 'ImageNet'
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='RandomResizedCrop',
+ size=456,
+ efficientnet_style=True,
+ interpolation='bicubic'),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='CenterCrop',
+ crop_size=456,
+ efficientnet_style=True,
+ interpolation='bicubic'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+data = dict(
+ train=dict(pipeline=train_pipeline),
+ val=dict(pipeline=test_pipeline),
+ test=dict(pipeline=test_pipeline))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/efficientnet/efficientnet-b6_8xb32-01norm_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/efficientnet/efficientnet-b6_8xb32-01norm_in1k.py
new file mode 100644
index 00000000..da64e0ec
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/efficientnet/efficientnet-b6_8xb32-01norm_in1k.py
@@ -0,0 +1,39 @@
+_base_ = [
+ '../_base_/models/efficientnet_b6.py',
+ '../_base_/datasets/imagenet_bs32.py',
+ '../_base_/schedules/imagenet_bs256.py',
+ '../_base_/default_runtime.py',
+]
+
+# dataset settings
+dataset_type = 'ImageNet'
+img_norm_cfg = dict(
+ mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_rgb=True)
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='RandomResizedCrop',
+ size=528,
+ efficientnet_style=True,
+ interpolation='bicubic'),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='CenterCrop',
+ crop_size=528,
+ efficientnet_style=True,
+ interpolation='bicubic'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+data = dict(
+ train=dict(pipeline=train_pipeline),
+ val=dict(pipeline=test_pipeline),
+ test=dict(pipeline=test_pipeline))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/efficientnet/efficientnet-b6_8xb32_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/efficientnet/efficientnet-b6_8xb32_in1k.py
new file mode 100644
index 00000000..6e03bb4c
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/efficientnet/efficientnet-b6_8xb32_in1k.py
@@ -0,0 +1,39 @@
+_base_ = [
+ '../_base_/models/efficientnet_b6.py',
+ '../_base_/datasets/imagenet_bs32.py',
+ '../_base_/schedules/imagenet_bs256.py',
+ '../_base_/default_runtime.py',
+]
+
+# dataset settings
+dataset_type = 'ImageNet'
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='RandomResizedCrop',
+ size=528,
+ efficientnet_style=True,
+ interpolation='bicubic'),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='CenterCrop',
+ crop_size=528,
+ efficientnet_style=True,
+ interpolation='bicubic'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+data = dict(
+ train=dict(pipeline=train_pipeline),
+ val=dict(pipeline=test_pipeline),
+ test=dict(pipeline=test_pipeline))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/efficientnet/efficientnet-b7_8xb32-01norm_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/efficientnet/efficientnet-b7_8xb32-01norm_in1k.py
new file mode 100644
index 00000000..27c19fc7
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/efficientnet/efficientnet-b7_8xb32-01norm_in1k.py
@@ -0,0 +1,39 @@
+_base_ = [
+ '../_base_/models/efficientnet_b7.py',
+ '../_base_/datasets/imagenet_bs32.py',
+ '../_base_/schedules/imagenet_bs256.py',
+ '../_base_/default_runtime.py',
+]
+
+# dataset settings
+dataset_type = 'ImageNet'
+img_norm_cfg = dict(
+ mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_rgb=True)
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='RandomResizedCrop',
+ size=600,
+ efficientnet_style=True,
+ interpolation='bicubic'),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='CenterCrop',
+ crop_size=600,
+ efficientnet_style=True,
+ interpolation='bicubic'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+data = dict(
+ train=dict(pipeline=train_pipeline),
+ val=dict(pipeline=test_pipeline),
+ test=dict(pipeline=test_pipeline))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/efficientnet/efficientnet-b7_8xb32_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/efficientnet/efficientnet-b7_8xb32_in1k.py
new file mode 100644
index 00000000..5146383e
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/efficientnet/efficientnet-b7_8xb32_in1k.py
@@ -0,0 +1,39 @@
+_base_ = [
+ '../_base_/models/efficientnet_b7.py',
+ '../_base_/datasets/imagenet_bs32.py',
+ '../_base_/schedules/imagenet_bs256.py',
+ '../_base_/default_runtime.py',
+]
+
+# dataset settings
+dataset_type = 'ImageNet'
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='RandomResizedCrop',
+ size=600,
+ efficientnet_style=True,
+ interpolation='bicubic'),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='CenterCrop',
+ crop_size=600,
+ efficientnet_style=True,
+ interpolation='bicubic'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+data = dict(
+ train=dict(pipeline=train_pipeline),
+ val=dict(pipeline=test_pipeline),
+ test=dict(pipeline=test_pipeline))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/efficientnet/efficientnet-b8_8xb32-01norm_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/efficientnet/efficientnet-b8_8xb32-01norm_in1k.py
new file mode 100644
index 00000000..25540a1a
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/efficientnet/efficientnet-b8_8xb32-01norm_in1k.py
@@ -0,0 +1,39 @@
+_base_ = [
+ '../_base_/models/efficientnet_b8.py',
+ '../_base_/datasets/imagenet_bs32.py',
+ '../_base_/schedules/imagenet_bs256.py',
+ '../_base_/default_runtime.py',
+]
+
+# dataset settings
+dataset_type = 'ImageNet'
+img_norm_cfg = dict(
+ mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_rgb=True)
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='RandomResizedCrop',
+ size=672,
+ efficientnet_style=True,
+ interpolation='bicubic'),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='CenterCrop',
+ crop_size=672,
+ efficientnet_style=True,
+ interpolation='bicubic'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+data = dict(
+ train=dict(pipeline=train_pipeline),
+ val=dict(pipeline=test_pipeline),
+ test=dict(pipeline=test_pipeline))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/efficientnet/efficientnet-b8_8xb32_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/efficientnet/efficientnet-b8_8xb32_in1k.py
new file mode 100644
index 00000000..4ff28c01
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/efficientnet/efficientnet-b8_8xb32_in1k.py
@@ -0,0 +1,39 @@
+_base_ = [
+ '../_base_/models/efficientnet_b8.py',
+ '../_base_/datasets/imagenet_bs32.py',
+ '../_base_/schedules/imagenet_bs256.py',
+ '../_base_/default_runtime.py',
+]
+
+# dataset settings
+dataset_type = 'ImageNet'
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='RandomResizedCrop',
+ size=672,
+ efficientnet_style=True,
+ interpolation='bicubic'),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='CenterCrop',
+ crop_size=672,
+ efficientnet_style=True,
+ interpolation='bicubic'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+data = dict(
+ train=dict(pipeline=train_pipeline),
+ val=dict(pipeline=test_pipeline),
+ test=dict(pipeline=test_pipeline))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/efficientnet/efficientnet-em_8xb32-01norm_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/efficientnet/efficientnet-em_8xb32-01norm_in1k.py
new file mode 100644
index 00000000..faa53862
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/efficientnet/efficientnet-em_8xb32-01norm_in1k.py
@@ -0,0 +1,39 @@
+_base_ = [
+ '../_base_/models/efficientnet_em.py',
+ '../_base_/datasets/imagenet_bs32.py',
+ '../_base_/schedules/imagenet_bs256.py',
+ '../_base_/default_runtime.py',
+]
+
+# dataset settings
+dataset_type = 'ImageNet'
+img_norm_cfg = dict(
+ mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_rgb=True)
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='RandomResizedCrop',
+ size=240,
+ efficientnet_style=True,
+ interpolation='bicubic'),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='CenterCrop',
+ crop_size=240,
+ efficientnet_style=True,
+ interpolation='bicubic'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+data = dict(
+ train=dict(pipeline=train_pipeline),
+ val=dict(pipeline=test_pipeline),
+ test=dict(pipeline=test_pipeline))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/efficientnet/efficientnet-es_8xb32-01norm_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/efficientnet/efficientnet-es_8xb32-01norm_in1k.py
new file mode 100644
index 00000000..5f11746f
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/efficientnet/efficientnet-es_8xb32-01norm_in1k.py
@@ -0,0 +1,39 @@
+_base_ = [
+ '../_base_/models/efficientnet_es.py',
+ '../_base_/datasets/imagenet_bs32.py',
+ '../_base_/schedules/imagenet_bs256.py',
+ '../_base_/default_runtime.py',
+]
+
+# dataset settings
+dataset_type = 'ImageNet'
+img_norm_cfg = dict(
+ mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_rgb=True)
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='RandomResizedCrop',
+ size=224,
+ efficientnet_style=True,
+ interpolation='bicubic'),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='CenterCrop',
+ crop_size=224,
+ efficientnet_style=True,
+ interpolation='bicubic'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+data = dict(
+ train=dict(pipeline=train_pipeline),
+ val=dict(pipeline=test_pipeline),
+ test=dict(pipeline=test_pipeline))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/efficientnet/metafile.yml b/openmmlab_test/mmclassification-0.24.1/configs/efficientnet/metafile.yml
new file mode 100644
index 00000000..c8bbf0dd
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/efficientnet/metafile.yml
@@ -0,0 +1,391 @@
+Collections:
+ - Name: EfficientNet
+ Metadata:
+ Training Data: ImageNet-1k
+ Architecture:
+ - 1x1 Convolution
+ - Average Pooling
+ - Convolution
+ - Dense Connections
+ - Dropout
+ - Inverted Residual Block
+ - RMSProp
+ - Squeeze-and-Excitation Block
+ - Swish
+ Paper:
+ URL: https://arxiv.org/abs/1905.11946v5
+ Title: "EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks"
+ README: configs/efficientnet/README.md
+ Code:
+ Version: v0.20.1
+ URL: https://github.com/open-mmlab/mmclassification/blob/v0.20.1/mmcls/models/backbones/efficientnet.py
+
+Models:
+ - Name: efficientnet-b0_3rdparty_8xb32_in1k
+ Metadata:
+ FLOPs: 16481180
+ Parameters: 5288548
+ In Collections: EfficientNet
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 76.74
+ Top 5 Accuracy: 93.17
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b0_3rdparty_8xb32_in1k_20220119-a7e2a0b1.pth
+ Config: configs/efficientnet/efficientnet-b0_8xb32_in1k.py
+ Converted From:
+ Weights: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/ckpts/efficientnet-b0.tar.gz
+ Code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet
+ - Name: efficientnet-b0_3rdparty_8xb32-aa_in1k
+ Metadata:
+ FLOPs: 16481180
+ Parameters: 5288548
+ In Collections: EfficientNet
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 77.26
+ Top 5 Accuracy: 93.41
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b0_3rdparty_8xb32-aa_in1k_20220119-8d939117.pth
+ Config: configs/efficientnet/efficientnet-b0_8xb32_in1k.py
+ Converted From:
+ Weights: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/ckptsaug/efficientnet-b0.tar.gz
+ Code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet
+ - Name: efficientnet-b0_3rdparty_8xb32-aa-advprop_in1k
+ Metadata:
+ FLOPs: 16481180
+ Parameters: 5288548
+ In Collections: EfficientNet
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 77.53
+ Top 5 Accuracy: 93.61
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b0_3rdparty_8xb32-aa-advprop_in1k_20220119-26434485.pth
+ Config: configs/efficientnet/efficientnet-b0_8xb32-01norm_in1k.py
+ Converted From:
+ Weights: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/advprop/efficientnet-b0.tar.gz
+ Code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet
+ - Name: efficientnet-b1_3rdparty_8xb32_in1k
+ Metadata:
+ FLOPs: 27052224
+ Parameters: 7794184
+ In Collections: EfficientNet
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 78.68
+ Top 5 Accuracy: 94.28
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b1_3rdparty_8xb32_in1k_20220119-002556d9.pth
+ Config: configs/efficientnet/efficientnet-b1_8xb32_in1k.py
+ Converted From:
+ Weights: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/ckpts/efficientnet-b1.tar.gz
+ Code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet
+ - Name: efficientnet-b1_3rdparty_8xb32-aa_in1k
+ Metadata:
+ FLOPs: 27052224
+ Parameters: 7794184
+ In Collections: EfficientNet
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 79.20
+ Top 5 Accuracy: 94.42
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b1_3rdparty_8xb32-aa_in1k_20220119-619d8ae3.pth
+ Config: configs/efficientnet/efficientnet-b1_8xb32_in1k.py
+ Converted From:
+ Weights: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/ckptsaug/efficientnet-b1.tar.gz
+ Code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet
+ - Name: efficientnet-b1_3rdparty_8xb32-aa-advprop_in1k
+ Metadata:
+ FLOPs: 27052224
+ Parameters: 7794184
+ In Collections: EfficientNet
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 79.52
+ Top 5 Accuracy: 94.43
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b1_3rdparty_8xb32-aa-advprop_in1k_20220119-5715267d.pth
+ Config: configs/efficientnet/efficientnet-b1_8xb32-01norm_in1k.py
+ Converted From:
+ Weights: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/advprop/efficientnet-b1.tar.gz
+ Code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet
+ - Name: efficientnet-b2_3rdparty_8xb32_in1k
+ Metadata:
+ FLOPs: 34346386
+ Parameters: 9109994
+ In Collections: EfficientNet
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 79.64
+ Top 5 Accuracy: 94.80
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b2_3rdparty_8xb32_in1k_20220119-ea374a30.pth
+ Config: configs/efficientnet/efficientnet-b2_8xb32_in1k.py
+ Converted From:
+ Weights: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/ckpts/efficientnet-b2.tar.gz
+ Code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet
+ - Name: efficientnet-b2_3rdparty_8xb32-aa_in1k
+ Metadata:
+ FLOPs: 34346386
+ Parameters: 9109994
+ In Collections: EfficientNet
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 80.21
+ Top 5 Accuracy: 94.96
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b2_3rdparty_8xb32-aa_in1k_20220119-dd61e80b.pth
+ Config: configs/efficientnet/efficientnet-b2_8xb32_in1k.py
+ Converted From:
+ Weights: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/ckptsaug/efficientnet-b2.tar.gz
+ Code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet
+ - Name: efficientnet-b2_3rdparty_8xb32-aa-advprop_in1k
+ Metadata:
+ FLOPs: 34346386
+ Parameters: 9109994
+ In Collections: EfficientNet
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 80.45
+ Top 5 Accuracy: 95.07
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b2_3rdparty_8xb32-aa-advprop_in1k_20220119-1655338a.pth
+ Config: configs/efficientnet/efficientnet-b2_8xb32-01norm_in1k.py
+ Converted From:
+ Weights: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/advprop/efficientnet-b2.tar.gz
+ Code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet
+ - Name: efficientnet-b3_3rdparty_8xb32_in1k
+ Metadata:
+ FLOPs: 58641904
+ Parameters: 12233232
+ In Collections: EfficientNet
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 81.01
+ Top 5 Accuracy: 95.34
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b3_3rdparty_8xb32_in1k_20220119-4b4d7487.pth
+ Config: configs/efficientnet/efficientnet-b3_8xb32_in1k.py
+ Converted From:
+ Weights: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/ckpts/efficientnet-b3.tar.gz
+ Code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet
+ - Name: efficientnet-b3_3rdparty_8xb32-aa_in1k
+ Metadata:
+ FLOPs: 58641904
+ Parameters: 12233232
+ In Collections: EfficientNet
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 81.58
+ Top 5 Accuracy: 95.67
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b3_3rdparty_8xb32-aa_in1k_20220119-5b4887a0.pth
+ Config: configs/efficientnet/efficientnet-b3_8xb32_in1k.py
+ Converted From:
+ Weights: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/ckptsaug/efficientnet-b3.tar.gz
+ Code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet
+ - Name: efficientnet-b3_3rdparty_8xb32-aa-advprop_in1k
+ Metadata:
+ FLOPs: 58641904
+ Parameters: 12233232
+ In Collections: EfficientNet
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 81.81
+ Top 5 Accuracy: 95.69
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b3_3rdparty_8xb32-aa-advprop_in1k_20220119-53b41118.pth
+ Config: configs/efficientnet/efficientnet-b3_8xb32-01norm_in1k.py
+ Converted From:
+ Weights: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/advprop/efficientnet-b3.tar.gz
+ Code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet
+ - Name: efficientnet-b4_3rdparty_8xb32_in1k
+ Metadata:
+ FLOPs: 121870624
+ Parameters: 19341616
+ In Collections: EfficientNet
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 82.57
+ Top 5 Accuracy: 96.09
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b4_3rdparty_8xb32_in1k_20220119-81fd4077.pth
+ Config: configs/efficientnet/efficientnet-b4_8xb32_in1k.py
+ Converted From:
+ Weights: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/ckpts/efficientnet-b4.tar.gz
+ Code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet
+ - Name: efficientnet-b4_3rdparty_8xb32-aa_in1k
+ Metadata:
+ FLOPs: 121870624
+ Parameters: 19341616
+ In Collections: EfficientNet
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 82.95
+ Top 5 Accuracy: 96.26
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b4_3rdparty_8xb32-aa_in1k_20220119-45b8bd2b.pth
+ Config: configs/efficientnet/efficientnet-b4_8xb32_in1k.py
+ Converted From:
+ Weights: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/ckptsaug/efficientnet-b4.tar.gz
+ Code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet
+ - Name: efficientnet-b4_3rdparty_8xb32-aa-advprop_in1k
+ Metadata:
+ FLOPs: 121870624
+ Parameters: 19341616
+ In Collections: EfficientNet
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 83.25
+ Top 5 Accuracy: 96.44
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b4_3rdparty_8xb32-aa-advprop_in1k_20220119-38c2238c.pth
+ Config: configs/efficientnet/efficientnet-b4_8xb32-01norm_in1k.py
+ Converted From:
+ Weights: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/advprop/efficientnet-b4.tar.gz
+ Code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet
+ - Name: efficientnet-b5_3rdparty_8xb32_in1k
+ Metadata:
+ FLOPs: 243879440
+ Parameters: 30389784
+ In Collections: EfficientNet
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 83.18
+ Top 5 Accuracy: 96.47
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b5_3rdparty_8xb32_in1k_20220119-e9814430.pth
+ Config: configs/efficientnet/efficientnet-b5_8xb32_in1k.py
+ Converted From:
+ Weights: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/ckpts/efficientnet-b5.tar.gz
+ Code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet
+ - Name: efficientnet-b5_3rdparty_8xb32-aa_in1k
+ Metadata:
+ FLOPs: 243879440
+ Parameters: 30389784
+ In Collections: EfficientNet
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 83.82
+ Top 5 Accuracy: 96.76
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b5_3rdparty_8xb32-aa_in1k_20220119-2cab8b78.pth
+ Config: configs/efficientnet/efficientnet-b5_8xb32_in1k.py
+ Converted From:
+ Weights: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/ckptsaug/efficientnet-b5.tar.gz
+ Code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet
+ - Name: efficientnet-b5_3rdparty_8xb32-aa-advprop_in1k
+ Metadata:
+ FLOPs: 243879440
+ Parameters: 30389784
+ In Collections: EfficientNet
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 84.21
+ Top 5 Accuracy: 96.98
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b5_3rdparty_8xb32-aa-advprop_in1k_20220119-f57a895a.pth
+ Config: configs/efficientnet/efficientnet-b5_8xb32-01norm_in1k.py
+ Converted From:
+ Weights: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/advprop/efficientnet-b5.tar.gz
+ Code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet
+ - Name: efficientnet-b6_3rdparty_8xb32-aa_in1k
+ Metadata:
+ FLOPs: 412002408
+ Parameters: 43040704
+ In Collections: EfficientNet
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 84.05
+ Top 5 Accuracy: 96.82
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b6_3rdparty_8xb32-aa_in1k_20220119-45b03310.pth
+ Config: configs/efficientnet/efficientnet-b6_8xb32_in1k.py
+ Converted From:
+ Weights: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/ckptsaug/efficientnet-b6.tar.gz
+ Code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet
+ - Name: efficientnet-b6_3rdparty_8xb32-aa-advprop_in1k
+ Metadata:
+ FLOPs: 412002408
+ Parameters: 43040704
+ In Collections: EfficientNet
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 84.74
+ Top 5 Accuracy: 97.14
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b6_3rdparty_8xb32-aa-advprop_in1k_20220119-bfe3485e.pth
+ Config: configs/efficientnet/efficientnet-b6_8xb32-01norm_in1k.py
+ Converted From:
+ Weights: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/advprop/efficientnet-b6.tar.gz
+ Code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet
+ - Name: efficientnet-b7_3rdparty_8xb32-aa_in1k
+ Metadata:
+ FLOPs: 715526512
+ Parameters: 66347960
+ In Collections: EfficientNet
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 84.38
+ Top 5 Accuracy: 96.88
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b7_3rdparty_8xb32-aa_in1k_20220119-bf03951c.pth
+ Config: configs/efficientnet/efficientnet-b7_8xb32_in1k.py
+ Converted From:
+ Weights: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/ckptsaug/efficientnet-b7.tar.gz
+ Code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet
+ - Name: efficientnet-b7_3rdparty_8xb32-aa-advprop_in1k
+ Metadata:
+ FLOPs: 715526512
+ Parameters: 66347960
+ In Collections: EfficientNet
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 85.14
+ Top 5 Accuracy: 97.23
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b7_3rdparty_8xb32-aa-advprop_in1k_20220119-c6dbff10.pth
+ Config: configs/efficientnet/efficientnet-b7_8xb32-01norm_in1k.py
+ Converted From:
+ Weights: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/advprop/efficientnet-b7.tar.gz
+ Code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet
+ - Name: efficientnet-b8_3rdparty_8xb32-aa-advprop_in1k
+ Metadata:
+ FLOPs: 1092755326
+ Parameters: 87413142
+ In Collections: EfficientNet
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 85.38
+ Top 5 Accuracy: 97.28
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b8_3rdparty_8xb32-aa-advprop_in1k_20220119-297ce1b7.pth
+ Config: configs/efficientnet/efficientnet-b8_8xb32-01norm_in1k.py
+ Converted From:
+ Weights: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/advprop/efficientnet-b8.tar.gz
+ Code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/fp16/resnet50_b32x8_fp16_dynamic_imagenet.py b/openmmlab_test/mmclassification-0.24.1/configs/fp16/resnet50_b32x8_fp16_dynamic_imagenet.py
new file mode 100644
index 00000000..9075a894
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/fp16/resnet50_b32x8_fp16_dynamic_imagenet.py
@@ -0,0 +1,6 @@
+_base_ = '../resnet/resnet50_8xb32-fp16-dynamic_in1k.py'
+
+_deprecation_ = dict(
+ expected='../resnet/resnet50_8xb32-fp16-dynamic_in1k.py',
+ reference='https://github.com/open-mmlab/mmclassification/pull/508',
+)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/fp16/resnet50_b32x8_fp16_imagenet.py b/openmmlab_test/mmclassification-0.24.1/configs/fp16/resnet50_b32x8_fp16_imagenet.py
new file mode 100644
index 00000000..a73a4097
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/fp16/resnet50_b32x8_fp16_imagenet.py
@@ -0,0 +1,6 @@
+_base_ = '../resnet/resnet50_8xb32-fp16_in1k.py'
+
+_deprecation_ = dict(
+ expected='../resnet/resnet50_8xb32-fp16_in1k.py',
+ reference='https://github.com/open-mmlab/mmclassification/pull/508',
+)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/hornet/README.md b/openmmlab_test/mmclassification-0.24.1/configs/hornet/README.md
new file mode 100644
index 00000000..7c1b9a9b
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/hornet/README.md
@@ -0,0 +1,51 @@
+# HorNet
+
+> [HorNet: Efficient High-Order Spatial Interactions with Recursive Gated Convolutions](https://arxiv.org/pdf/2207.14284v2.pdf)
+
+
+
+## Abstract
+
+Recent progress in vision Transformers exhibits great success in various tasks driven by the new spatial modeling mechanism based on dot-product self-attention. In this paper, we show that the key ingredients behind the vision Transformers, namely input-adaptive, long-range and high-order spatial interactions, can also be efficiently implemented with a convolution-based framework. We present the Recursive Gated Convolution (g nConv) that performs high-order spatial interactions with gated convolutions and recursive designs. The new operation is highly flexible and customizable, which is compatible with various variants of convolution and extends the two-order interactions in self-attention to arbitrary orders without introducing significant extra computation. g nConv can serve as a plug-and-play module to improve various vision Transformers and convolution-based models. Based on the operation, we construct a new family of generic vision backbones named HorNet. Extensive experiments on ImageNet classification, COCO object detection and ADE20K semantic segmentation show HorNet outperform Swin Transformers and ConvNeXt by a significant margin with similar overall architecture and training configurations. HorNet also shows favorable scalability to more training data and a larger model size. Apart from the effectiveness in visual encoders, we also show g nConv can be applied to task-specific decoders and consistently improve dense prediction performance with less computation. Our results demonstrate that g nConv can be a new basic module for visual modeling that effectively combines the merits of both vision Transformers and CNNs. Code is available at https://github.com/raoyongming/HorNet.
+
+
+

+
+
+## Results and models
+
+### ImageNet-1k
+
+| Model | Pretrain | resolution | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download |
+| :-----------: | :----------: | :--------: | :-------: | :------: | :-------: | :-------: | :--------------------------------------------------------------: | :----------------------------------------------------------------: |
+| HorNet-T\* | From scratch | 224x224 | 22.41 | 3.98 | 82.84 | 96.24 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/hornet/hornet-tiny_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/hornet/hornet-tiny_3rdparty_in1k_20220915-0e8eedff.pth) |
+| HorNet-T-GF\* | From scratch | 224x224 | 22.99 | 3.9 | 82.98 | 96.38 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/hornet/hornet-tiny-gf_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/hornet/hornet-tiny-gf_3rdparty_in1k_20220915-4c35a66b.pth) |
+| HorNet-S\* | From scratch | 224x224 | 49.53 | 8.83 | 83.79 | 96.75 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/hornet/hornet-small_8xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/hornet/hornet-small_3rdparty_in1k_20220915-5935f60f.pth) |
+| HorNet-S-GF\* | From scratch | 224x224 | 50.4 | 8.71 | 83.98 | 96.77 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/hornet/hornet-small-gf_8xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/hornet/hornet-small-gf_3rdparty_in1k_20220915-649ca492.pth) |
+| HorNet-B\* | From scratch | 224x224 | 87.26 | 15.59 | 84.24 | 96.94 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/hornet/hornet-base_8xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/hornet/hornet-base_3rdparty_in1k_20220915-a06176bb.pth) |
+| HorNet-B-GF\* | From scratch | 224x224 | 88.42 | 15.42 | 84.32 | 96.95 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/hornet/hornet-base-gf_8xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/hornet/hornet-base-gf_3rdparty_in1k_20220915-82c06fa7.pth) |
+
+\*Models with * are converted from [the official repo](https://github.com/raoyongming/HorNet). The config files of these models are only for validation. We don't ensure these config files' training accuracy and welcome you to contribute your reproduction results.
+
+### Pre-trained Models
+
+The pre-trained models on ImageNet-21k are used to fine-tune on the downstream tasks.
+
+| Model | Pretrain | resolution | Params(M) | Flops(G) | Download |
+| :--------------: | :----------: | :--------: | :-------: | :------: | :------------------------------------------------------------------------------------------------------------------------: |
+| HorNet-L\* | ImageNet-21k | 224x224 | 194.54 | 34.83 | [model](https://download.openmmlab.com/mmclassification/v0/hornet/hornet-large_3rdparty_in21k_20220909-9ccef421.pth) |
+| HorNet-L-GF\* | ImageNet-21k | 224x224 | 196.29 | 34.58 | [model](https://download.openmmlab.com/mmclassification/v0/hornet/hornet-large-gf_3rdparty_in21k_20220909-3aea3b61.pth) |
+| HorNet-L-GF384\* | ImageNet-21k | 384x384 | 201.23 | 101.63 | [model](https://download.openmmlab.com/mmclassification/v0/hornet/hornet-large-gf384_3rdparty_in21k_20220909-80894290.pth) |
+
+\*Models with * are converted from [the official repo](https://github.com/raoyongming/HorNet).
+
+## Citation
+
+```
+@article{rao2022hornet,
+ title={HorNet: Efficient High-Order Spatial Interactions with Recursive Gated Convolutions},
+ author={Rao, Yongming and Zhao, Wenliang and Tang, Yansong and Zhou, Jie and Lim, Ser-Lam and Lu, Jiwen},
+ journal={arXiv preprint arXiv:2207.14284},
+ year={2022}
+}
+```
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/hornet/hornet-base-gf_8xb64_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/hornet/hornet-base-gf_8xb64_in1k.py
new file mode 100644
index 00000000..6c29de66
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/hornet/hornet-base-gf_8xb64_in1k.py
@@ -0,0 +1,13 @@
+_base_ = [
+ '../_base_/models/hornet/hornet-base-gf.py',
+ '../_base_/datasets/imagenet_bs64_swin_224.py',
+ '../_base_/schedules/imagenet_bs1024_adamw_swin.py',
+ '../_base_/default_runtime.py',
+]
+
+data = dict(samples_per_gpu=64)
+
+optimizer = dict(lr=4e-3)
+optimizer_config = dict(grad_clip=dict(max_norm=1.0), _delete_=True)
+
+custom_hooks = [dict(type='EMAHook', momentum=4e-5, priority='ABOVE_NORMAL')]
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/hornet/hornet-base_8xb64_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/hornet/hornet-base_8xb64_in1k.py
new file mode 100644
index 00000000..969d8b95
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/hornet/hornet-base_8xb64_in1k.py
@@ -0,0 +1,13 @@
+_base_ = [
+ '../_base_/models/hornet/hornet-base.py',
+ '../_base_/datasets/imagenet_bs64_swin_224.py',
+ '../_base_/schedules/imagenet_bs1024_adamw_swin.py',
+ '../_base_/default_runtime.py',
+]
+
+data = dict(samples_per_gpu=64)
+
+optimizer = dict(lr=4e-3)
+optimizer_config = dict(grad_clip=dict(max_norm=5.0), _delete_=True)
+
+custom_hooks = [dict(type='EMAHook', momentum=4e-5, priority='ABOVE_NORMAL')]
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/hornet/hornet-small-gf_8xb64_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/hornet/hornet-small-gf_8xb64_in1k.py
new file mode 100644
index 00000000..deb570eb
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/hornet/hornet-small-gf_8xb64_in1k.py
@@ -0,0 +1,13 @@
+_base_ = [
+ '../_base_/models/hornet/hornet-small-gf.py',
+ '../_base_/datasets/imagenet_bs64_swin_224.py',
+ '../_base_/schedules/imagenet_bs1024_adamw_swin.py',
+ '../_base_/default_runtime.py',
+]
+
+data = dict(samples_per_gpu=64)
+
+optimizer = dict(lr=4e-3)
+optimizer_config = dict(grad_clip=dict(max_norm=1.0), _delete_=True)
+
+custom_hooks = [dict(type='EMAHook', momentum=4e-5, priority='ABOVE_NORMAL')]
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/hornet/hornet-small_8xb64_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/hornet/hornet-small_8xb64_in1k.py
new file mode 100644
index 00000000..c07fa60d
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/hornet/hornet-small_8xb64_in1k.py
@@ -0,0 +1,13 @@
+_base_ = [
+ '../_base_/models/hornet/hornet-small.py',
+ '../_base_/datasets/imagenet_bs64_swin_224.py',
+ '../_base_/schedules/imagenet_bs1024_adamw_swin.py',
+ '../_base_/default_runtime.py',
+]
+
+data = dict(samples_per_gpu=64)
+
+optimizer = dict(lr=4e-3)
+optimizer_config = dict(grad_clip=dict(max_norm=5.0), _delete_=True)
+
+custom_hooks = [dict(type='EMAHook', momentum=4e-5, priority='ABOVE_NORMAL')]
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/hornet/hornet-tiny-gf_8xb128_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/hornet/hornet-tiny-gf_8xb128_in1k.py
new file mode 100644
index 00000000..3a1d1a7a
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/hornet/hornet-tiny-gf_8xb128_in1k.py
@@ -0,0 +1,13 @@
+_base_ = [
+ '../_base_/models/hornet/hornet-tiny-gf.py',
+ '../_base_/datasets/imagenet_bs64_swin_224.py',
+ '../_base_/schedules/imagenet_bs1024_adamw_swin.py',
+ '../_base_/default_runtime.py',
+]
+
+data = dict(samples_per_gpu=128)
+
+optimizer = dict(lr=4e-3)
+optimizer_config = dict(grad_clip=dict(max_norm=1.0), _delete_=True)
+
+custom_hooks = [dict(type='EMAHook', momentum=4e-5, priority='ABOVE_NORMAL')]
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/hornet/hornet-tiny_8xb128_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/hornet/hornet-tiny_8xb128_in1k.py
new file mode 100644
index 00000000..69a7cdf0
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/hornet/hornet-tiny_8xb128_in1k.py
@@ -0,0 +1,13 @@
+_base_ = [
+ '../_base_/models/hornet/hornet-tiny.py',
+ '../_base_/datasets/imagenet_bs64_swin_224.py',
+ '../_base_/schedules/imagenet_bs1024_adamw_swin.py',
+ '../_base_/default_runtime.py',
+]
+
+data = dict(samples_per_gpu=128)
+
+optimizer = dict(lr=4e-3)
+optimizer_config = dict(grad_clip=dict(max_norm=100.0), _delete_=True)
+
+custom_hooks = [dict(type='EMAHook', momentum=4e-5, priority='ABOVE_NORMAL')]
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/hornet/metafile.yml b/openmmlab_test/mmclassification-0.24.1/configs/hornet/metafile.yml
new file mode 100644
index 00000000..71207722
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/hornet/metafile.yml
@@ -0,0 +1,97 @@
+Collections:
+ - Name: HorNet
+ Metadata:
+ Training Data: ImageNet-1k
+ Training Techniques:
+ - AdamW
+ - Weight Decay
+ Architecture:
+ - HorNet
+ - gnConv
+ Paper:
+ URL: https://arxiv.org/pdf/2207.14284v2.pdf
+ Title: "HorNet: Efficient High-Order Spatial Interactions with Recursive Gated Convolutions"
+ README: configs/hornet/README.md
+ Code:
+ Version: v0.24.0
+ URL: https://github.com/open-mmlab/mmclassification/blob/v0.24.0/mmcls/models/backbones/hornet.py
+
+Models:
+ - Name: hornet-tiny_3rdparty_in1k
+ Metadata:
+ FLOPs: 3980000000 # 3.98G
+ Parameters: 22410000 # 22.41M
+ In Collection: HorNet
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 82.84
+ Top 5 Accuracy: 96.24
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/hornet/hornet-tiny_3rdparty_in1k_20220915-0e8eedff.pth
+ Config: configs/hornet/hornet-tiny_8xb128_in1k.py
+ - Name: hornet-tiny-gf_3rdparty_in1k
+ Metadata:
+ FLOPs: 3900000000 # 3.9G
+ Parameters: 22990000 # 22.99M
+ In Collection: HorNet
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 82.98
+ Top 5 Accuracy: 96.38
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/hornet/hornet-tiny-gf_3rdparty_in1k_20220915-4c35a66b.pth
+ Config: configs/hornet/hornet-tiny-gf_8xb128_in1k.py
+ - Name: hornet-small_3rdparty_in1k
+ Metadata:
+ FLOPs: 8830000000 # 8.83G
+ Parameters: 49530000 # 49.53M
+ In Collection: HorNet
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 83.79
+ Top 5 Accuracy: 96.75
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/hornet/hornet-small_3rdparty_in1k_20220915-5935f60f.pth
+ Config: configs/hornet/hornet-small_8xb64_in1k.py
+ - Name: hornet-small-gf_3rdparty_in1k
+ Metadata:
+ FLOPs: 8710000000 # 8.71G
+ Parameters: 50400000 # 50.4M
+ In Collection: HorNet
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 83.98
+ Top 5 Accuracy: 96.77
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/hornet/hornet-small-gf_3rdparty_in1k_20220915-649ca492.pth
+ Config: configs/hornet/hornet-small-gf_8xb64_in1k.py
+ - Name: hornet-base_3rdparty_in1k
+ Metadata:
+ FLOPs: 15590000000 # 15.59G
+ Parameters: 87260000 # 87.26M
+ In Collection: HorNet
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 84.24
+ Top 5 Accuracy: 96.94
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/hornet/hornet-base_3rdparty_in1k_20220915-a06176bb.pth
+ Config: configs/hornet/hornet-base_8xb64_in1k.py
+ - Name: hornet-base-gf_3rdparty_in1k
+ Metadata:
+ FLOPs: 15420000000 # 15.42G
+ Parameters: 88420000 # 88.42M
+ In Collection: HorNet
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 84.32
+ Top 5 Accuracy: 96.95
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/hornet/hornet-base-gf_3rdparty_in1k_20220915-82c06fa7.pth
+ Config: configs/hornet/hornet-base-gf_8xb64_in1k.py
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/hrnet/README.md b/openmmlab_test/mmclassification-0.24.1/configs/hrnet/README.md
new file mode 100644
index 00000000..0a30ccd1
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/hrnet/README.md
@@ -0,0 +1,44 @@
+# HRNet
+
+> [Deep High-Resolution Representation Learning for Visual Recognition](https://arxiv.org/abs/1908.07919v2)
+
+
+
+## Abstract
+
+High-resolution representations are essential for position-sensitive vision problems, such as human pose estimation, semantic segmentation, and object detection. Existing state-of-the-art frameworks first encode the input image as a low-resolution representation through a subnetwork that is formed by connecting high-to-low resolution convolutions *in series* (e.g., ResNet, VGGNet), and then recover the high-resolution representation from the encoded low-resolution representation. Instead, our proposed network, named as High-Resolution Network (HRNet), maintains high-resolution representations through the whole process. There are two key characteristics: (i) Connect the high-to-low resolution convolution streams *in parallel*; (ii) Repeatedly exchange the information across resolutions. The benefit is that the resulting representation is semantically richer and spatially more precise. We show the superiority of the proposed HRNet in a wide range of applications, including human pose estimation, semantic segmentation, and object detection, suggesting that the HRNet is a stronger backbone for computer vision problems.
+
+
+

+
+
+## Results and models
+
+## ImageNet-1k
+
+| Model | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download |
+| :----------------: | :-------: | :------: | :-------: | :-------: | :----------------------------------------------------------------------: | :-------------------------------------------------------------------------: |
+| HRNet-W18\* | 21.30 | 4.33 | 76.75 | 93.44 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/hrnet/hrnet-w18_4xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/hrnet/hrnet-w18_3rdparty_8xb32_in1k_20220120-0c10b180.pth) |
+| HRNet-W30\* | 37.71 | 8.17 | 78.19 | 94.22 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/hrnet/hrnet-w30_4xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/hrnet/hrnet-w30_3rdparty_8xb32_in1k_20220120-8aa3832f.pth) |
+| HRNet-W32\* | 41.23 | 8.99 | 78.44 | 94.19 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/hrnet/hrnet-w32_4xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/hrnet/hrnet-w32_3rdparty_8xb32_in1k_20220120-c394f1ab.pth) |
+| HRNet-W40\* | 57.55 | 12.77 | 78.94 | 94.47 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/hrnet/hrnet-w40_4xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/hrnet/hrnet-w40_3rdparty_8xb32_in1k_20220120-9a2dbfc5.pth) |
+| HRNet-W44\* | 67.06 | 14.96 | 78.88 | 94.37 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/hrnet/hrnet-w44_4xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/hrnet/hrnet-w44_3rdparty_8xb32_in1k_20220120-35d07f73.pth) |
+| HRNet-W48\* | 77.47 | 17.36 | 79.32 | 94.52 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/hrnet/hrnet-w48_4xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/hrnet/hrnet-w48_3rdparty_8xb32_in1k_20220120-e555ef50.pth) |
+| HRNet-W64\* | 128.06 | 29.00 | 79.46 | 94.65 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/hrnet/hrnet-w64_4xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/hrnet/hrnet-w64_3rdparty_8xb32_in1k_20220120-19126642.pth) |
+| HRNet-W18 (ssld)\* | 21.30 | 4.33 | 81.06 | 95.70 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/hrnet/hrnet-w18_4xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/hrnet/hrnet-w18_3rdparty_8xb32-ssld_in1k_20220120-455f69ea.pth) |
+| HRNet-W48 (ssld)\* | 77.47 | 17.36 | 83.63 | 96.79 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/hrnet/hrnet-w48_4xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/hrnet/hrnet-w48_3rdparty_8xb32-ssld_in1k_20220120-d0459c38.pth) |
+
+*Models with * are converted from the [official repo](https://github.com/HRNet/HRNet-Image-Classification). The config files of these models are only for inference. We don't ensure these config files' training accuracy and welcome you to contribute your reproduction results.*
+
+## Citation
+
+```
+@article{WangSCJDZLMTWLX19,
+ title={Deep High-Resolution Representation Learning for Visual Recognition},
+ author={Jingdong Wang and Ke Sun and Tianheng Cheng and
+ Borui Jiang and Chaorui Deng and Yang Zhao and Dong Liu and Yadong Mu and
+ Mingkui Tan and Xinggang Wang and Wenyu Liu and Bin Xiao},
+ journal = {TPAMI}
+ year={2019}
+}
+```
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/hrnet/hrnet-w18_4xb32_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/hrnet/hrnet-w18_4xb32_in1k.py
new file mode 100644
index 00000000..a84fe67f
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/hrnet/hrnet-w18_4xb32_in1k.py
@@ -0,0 +1,6 @@
+_base_ = [
+ '../_base_/models/hrnet/hrnet-w18.py',
+ '../_base_/datasets/imagenet_bs32_pil_resize.py',
+ '../_base_/schedules/imagenet_bs256_coslr.py',
+ '../_base_/default_runtime.py'
+]
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/hrnet/hrnet-w30_4xb32_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/hrnet/hrnet-w30_4xb32_in1k.py
new file mode 100644
index 00000000..d2a9c0dd
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/hrnet/hrnet-w30_4xb32_in1k.py
@@ -0,0 +1,6 @@
+_base_ = [
+ '../_base_/models/hrnet/hrnet-w30.py',
+ '../_base_/datasets/imagenet_bs32_pil_resize.py',
+ '../_base_/schedules/imagenet_bs256_coslr.py',
+ '../_base_/default_runtime.py'
+]
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/hrnet/hrnet-w32_4xb32_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/hrnet/hrnet-w32_4xb32_in1k.py
new file mode 100644
index 00000000..91380a96
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/hrnet/hrnet-w32_4xb32_in1k.py
@@ -0,0 +1,6 @@
+_base_ = [
+ '../_base_/models/hrnet/hrnet-w32.py',
+ '../_base_/datasets/imagenet_bs32_pil_resize.py',
+ '../_base_/schedules/imagenet_bs256_coslr.py',
+ '../_base_/default_runtime.py'
+]
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/hrnet/hrnet-w40_4xb32_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/hrnet/hrnet-w40_4xb32_in1k.py
new file mode 100644
index 00000000..5d35cecd
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/hrnet/hrnet-w40_4xb32_in1k.py
@@ -0,0 +1,6 @@
+_base_ = [
+ '../_base_/models/hrnet/hrnet-w40.py',
+ '../_base_/datasets/imagenet_bs32_pil_resize.py',
+ '../_base_/schedules/imagenet_bs256_coslr.py',
+ '../_base_/default_runtime.py'
+]
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/hrnet/hrnet-w44_4xb32_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/hrnet/hrnet-w44_4xb32_in1k.py
new file mode 100644
index 00000000..ce6bb41a
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/hrnet/hrnet-w44_4xb32_in1k.py
@@ -0,0 +1,6 @@
+_base_ = [
+ '../_base_/models/hrnet/hrnet-w44.py',
+ '../_base_/datasets/imagenet_bs32_pil_resize.py',
+ '../_base_/schedules/imagenet_bs256_coslr.py',
+ '../_base_/default_runtime.py'
+]
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/hrnet/hrnet-w48_4xb32_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/hrnet/hrnet-w48_4xb32_in1k.py
new file mode 100644
index 00000000..6943892e
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/hrnet/hrnet-w48_4xb32_in1k.py
@@ -0,0 +1,6 @@
+_base_ = [
+ '../_base_/models/hrnet/hrnet-w48.py',
+ '../_base_/datasets/imagenet_bs32_pil_resize.py',
+ '../_base_/schedules/imagenet_bs256_coslr.py',
+ '../_base_/default_runtime.py'
+]
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/hrnet/hrnet-w64_4xb32_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/hrnet/hrnet-w64_4xb32_in1k.py
new file mode 100644
index 00000000..0009bc67
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/hrnet/hrnet-w64_4xb32_in1k.py
@@ -0,0 +1,6 @@
+_base_ = [
+ '../_base_/models/hrnet/hrnet-w64.py',
+ '../_base_/datasets/imagenet_bs32_pil_resize.py',
+ '../_base_/schedules/imagenet_bs256_coslr.py',
+ '../_base_/default_runtime.py'
+]
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/hrnet/metafile.yml b/openmmlab_test/mmclassification-0.24.1/configs/hrnet/metafile.yml
new file mode 100644
index 00000000..64fe1422
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/hrnet/metafile.yml
@@ -0,0 +1,162 @@
+Collections:
+ - Name: HRNet
+ Metadata:
+ Training Data: ImageNet-1k
+ Architecture:
+ - Batch Normalization
+ - Convolution
+ - ReLU
+ - Residual Connection
+ Paper:
+ URL: https://arxiv.org/abs/1908.07919v2
+ Title: "Deep High-Resolution Representation Learning for Visual Recognition"
+ README: configs/hrnet/README.md
+ Code:
+ URL: https://github.com/open-mmlab/mmclassification/blob/v0.20.1/mmcls/models/backbones/hrnet.py
+ Version: v0.20.1
+
+Models:
+ - Name: hrnet-w18_3rdparty_8xb32_in1k
+ Metadata:
+ FLOPs: 4330397932
+ Parameters: 21295164
+ In Collection: HRNet
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 76.75
+ Top 5 Accuracy: 93.44
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/hrnet/hrnet-w18_3rdparty_8xb32_in1k_20220120-0c10b180.pth
+ Config: configs/hrnet/hrnet-w18_4xb32_in1k.py
+ Converted From:
+ Weights: https://1drv.ms/u/s!Aus8VCZ_C_33cMkPimlmClRvmpw
+ Code: https://github.com/HRNet/HRNet-Image-Classification
+ - Name: hrnet-w30_3rdparty_8xb32_in1k
+ Metadata:
+ FLOPs: 8168305684
+ Parameters: 37708380
+ In Collection: HRNet
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 78.19
+ Top 5 Accuracy: 94.22
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/hrnet/hrnet-w30_3rdparty_8xb32_in1k_20220120-8aa3832f.pth
+ Config: configs/hrnet/hrnet-w30_4xb32_in1k.py
+ Converted From:
+ Weights: https://1drv.ms/u/s!Aus8VCZ_C_33cQoACCEfrzcSaVI
+ Code: https://github.com/HRNet/HRNet-Image-Classification
+ - Name: hrnet-w32_3rdparty_8xb32_in1k
+ Metadata:
+ FLOPs: 8986267584
+ Parameters: 41228840
+ In Collection: HRNet
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 78.44
+ Top 5 Accuracy: 94.19
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/hrnet/hrnet-w32_3rdparty_8xb32_in1k_20220120-c394f1ab.pth
+ Config: configs/hrnet/hrnet-w32_4xb32_in1k.py
+ Converted From:
+ Weights: https://1drv.ms/u/s!Aus8VCZ_C_33dYBMemi9xOUFR0w
+ Code: https://github.com/HRNet/HRNet-Image-Classification
+ - Name: hrnet-w40_3rdparty_8xb32_in1k
+ Metadata:
+ FLOPs: 12767574064
+ Parameters: 57553320
+ In Collection: HRNet
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 78.94
+ Top 5 Accuracy: 94.47
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/hrnet/hrnet-w40_3rdparty_8xb32_in1k_20220120-9a2dbfc5.pth
+ Config: configs/hrnet/hrnet-w40_4xb32_in1k.py
+ Converted From:
+ Weights: https://1drv.ms/u/s!Aus8VCZ_C_33ck0gvo5jfoWBOPo
+ Code: https://github.com/HRNet/HRNet-Image-Classification
+ - Name: hrnet-w44_3rdparty_8xb32_in1k
+ Metadata:
+ FLOPs: 14963902632
+ Parameters: 67061144
+ In Collection: HRNet
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 78.88
+ Top 5 Accuracy: 94.37
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/hrnet/hrnet-w44_3rdparty_8xb32_in1k_20220120-35d07f73.pth
+ Config: configs/hrnet/hrnet-w44_4xb32_in1k.py
+ Converted From:
+ Weights: https://1drv.ms/u/s!Aus8VCZ_C_33czZQ0woUb980gRs
+ Code: https://github.com/HRNet/HRNet-Image-Classification
+ - Name: hrnet-w48_3rdparty_8xb32_in1k
+ Metadata:
+ FLOPs: 17364014752
+ Parameters: 77466024
+ In Collection: HRNet
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 79.32
+ Top 5 Accuracy: 94.52
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/hrnet/hrnet-w48_3rdparty_8xb32_in1k_20220120-e555ef50.pth
+ Config: configs/hrnet/hrnet-w48_4xb32_in1k.py
+ Converted From:
+ Weights: https://1drv.ms/u/s!Aus8VCZ_C_33dKvqI6pBZlifgJk
+ Code: https://github.com/HRNet/HRNet-Image-Classification
+ - Name: hrnet-w64_3rdparty_8xb32_in1k
+ Metadata:
+ FLOPs: 29002298752
+ Parameters: 128056104
+ In Collection: HRNet
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 79.46
+ Top 5 Accuracy: 94.65
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/hrnet/hrnet-w64_3rdparty_8xb32_in1k_20220120-19126642.pth
+ Config: configs/hrnet/hrnet-w64_4xb32_in1k.py
+ Converted From:
+ Weights: https://1drv.ms/u/s!Aus8VCZ_C_33gQbJsUPTIj3rQu99
+ Code: https://github.com/HRNet/HRNet-Image-Classification
+ - Name: hrnet-w18_3rdparty_8xb32-ssld_in1k
+ Metadata:
+ FLOPs: 4330397932
+ Parameters: 21295164
+ In Collection: HRNet
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 81.06
+ Top 5 Accuracy: 95.7
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/hrnet/hrnet-w18_3rdparty_8xb32-ssld_in1k_20220120-455f69ea.pth
+ Config: configs/hrnet/hrnet-w18_4xb32_in1k.py
+ Converted From:
+ Weights: https://github.com/HRNet/HRNet-Image-Classification/releases/download/PretrainedWeights/HRNet_W18_C_ssld_pretrained.pth
+ Code: https://github.com/HRNet/HRNet-Image-Classification
+ - Name: hrnet-w48_3rdparty_8xb32-ssld_in1k
+ Metadata:
+ FLOPs: 17364014752
+ Parameters: 77466024
+ In Collection: HRNet
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 83.63
+ Top 5 Accuracy: 96.79
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/hrnet/hrnet-w48_3rdparty_8xb32-ssld_in1k_20220120-d0459c38.pth
+ Config: configs/hrnet/hrnet-w48_4xb32_in1k.py
+ Converted From:
+ Weights: https://github.com/HRNet/HRNet-Image-Classification/releases/download/PretrainedWeights/HRNet_W48_C_ssld_pretrained.pth
+ Code: https://github.com/HRNet/HRNet-Image-Classification
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/lenet/README.md b/openmmlab_test/mmclassification-0.24.1/configs/lenet/README.md
new file mode 100644
index 00000000..2cd68eac
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/lenet/README.md
@@ -0,0 +1,28 @@
+# LeNet
+
+> [Backpropagation Applied to Handwritten Zip Code Recognition](https://ieeexplore.ieee.org/document/6795724)
+
+
+
+## Abstract
+
+The ability of learning networks to generalize can be greatly enhanced by providing constraints from the task domain. This paper demonstrates how such constraints can be integrated into a backpropagation network through the architecture of the network. This approach has been successfully applied to the recognition of handwritten zip code digits provided by the U.S. Postal Service. A single network learns the entire recognition operation, going from the normalized image of the character to the final classification.
+
+
+

+
+
+## Citation
+
+```
+@ARTICLE{6795724,
+ author={Y. {LeCun} and B. {Boser} and J. S. {Denker} and D. {Henderson} and R. E. {Howard} and W. {Hubbard} and L. D. {Jackel}},
+ journal={Neural Computation},
+ title={Backpropagation Applied to Handwritten Zip Code Recognition},
+ year={1989},
+ volume={1},
+ number={4},
+ pages={541-551},
+ doi={10.1162/neco.1989.1.4.541}}
+}
+```
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/lenet/lenet5_mnist.py b/openmmlab_test/mmclassification-0.24.1/configs/lenet/lenet5_mnist.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/lenet/lenet5_mnist.py
rename to openmmlab_test/mmclassification-0.24.1/configs/lenet/lenet5_mnist.py
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/mlp_mixer/README.md b/openmmlab_test/mmclassification-0.24.1/configs/mlp_mixer/README.md
new file mode 100644
index 00000000..5ec98871
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/mlp_mixer/README.md
@@ -0,0 +1,37 @@
+# Mlp-Mixer
+
+> [MLP-Mixer: An all-MLP Architecture for Vision](https://arxiv.org/abs/2105.01601)
+
+
+
+## Abstract
+
+Convolutional Neural Networks (CNNs) are the go-to model for computer vision. Recently, attention-based networks, such as the Vision Transformer, have also become popular. In this paper we show that while convolutions and attention are both sufficient for good performance, neither of them are necessary. We present MLP-Mixer, an architecture based exclusively on multi-layer perceptrons (MLPs). MLP-Mixer contains two types of layers: one with MLPs applied independently to image patches (i.e. "mixing" the per-location features), and one with MLPs applied across patches (i.e. "mixing" spatial information). When trained on large datasets, or with modern regularization schemes, MLP-Mixer attains competitive scores on image classification benchmarks, with pre-training and inference cost comparable to state-of-the-art models. We hope that these results spark further research beyond the realms of well established CNNs and Transformers.
+
+
+

+
+
+## Results and models
+
+### ImageNet-1k
+
+| Model | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download |
+| :----------: | :-------: | :------: | :-------: | :-------: | :-------------------------------------------------------------------------: | :----------------------------------------------------------------------------: |
+| Mixer-B/16\* | 59.88 | 12.61 | 76.68 | 92.25 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/mlp_mixer/mlp-mixer-base-p16_64xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/mlp-mixer/mixer-base-p16_3rdparty_64xb64_in1k_20211124-1377e3e0.pth) |
+| Mixer-L/16\* | 208.2 | 44.57 | 72.34 | 88.02 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/mlp_mixer/mlp-mixer-large-p16_64xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/mlp-mixer/mixer-large-p16_3rdparty_64xb64_in1k_20211124-5a2519d2.pth) |
+
+*Models with * are converted from [timm](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/mlp_mixer.py). The config files of these models are only for validation. We don't ensure these config files' training accuracy and welcome you to contribute your reproduction results.*
+
+## Citation
+
+```
+@misc{tolstikhin2021mlpmixer,
+ title={MLP-Mixer: An all-MLP Architecture for Vision},
+ author={Ilya Tolstikhin and Neil Houlsby and Alexander Kolesnikov and Lucas Beyer and Xiaohua Zhai and Thomas Unterthiner and Jessica Yung and Andreas Steiner and Daniel Keysers and Jakob Uszkoreit and Mario Lucic and Alexey Dosovitskiy},
+ year={2021},
+ eprint={2105.01601},
+ archivePrefix={arXiv},
+ primaryClass={cs.CV}
+}
+```
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/mlp_mixer/metafile.yml b/openmmlab_test/mmclassification-0.24.1/configs/mlp_mixer/metafile.yml
new file mode 100644
index 00000000..e8efa085
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/mlp_mixer/metafile.yml
@@ -0,0 +1,50 @@
+Collections:
+ - Name: MLP-Mixer
+ Metadata:
+ Training Data: ImageNet-1k
+ Architecture:
+ - MLP
+ - Layer Normalization
+ - Dropout
+ Paper:
+ URL: https://arxiv.org/abs/2105.01601
+ Title: "MLP-Mixer: An all-MLP Architecture for Vision"
+ README: configs/mlp_mixer/README.md
+ Code:
+ URL: https://github.com/open-mmlab/mmclassification/blob/v0.18.0/mmcls/models/backbones/mlp_mixer.py
+ Version: v0.18.0
+
+Models:
+ - Name: mlp-mixer-base-p16_3rdparty_64xb64_in1k
+ In Collection: MLP-Mixer
+ Config: configs/mlp_mixer/mlp-mixer-base-p16_64xb64_in1k.py
+ Metadata:
+ FLOPs: 12610000000 # 12.61 G
+ Parameters: 59880000 # 59.88 M
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 76.68
+ Top 5 Accuracy: 92.25
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/mlp-mixer/mixer-base-p16_3rdparty_64xb64_in1k_20211124-1377e3e0.pth
+ Converted From:
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_mixer_b16_224-76587d61.pth
+ Code: https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/mlp_mixer.py#L70
+
+ - Name: mlp-mixer-large-p16_3rdparty_64xb64_in1k
+ In Collection: MLP-Mixer
+ Config: configs/mlp_mixer/mlp-mixer-large-p16_64xb64_in1k.py
+ Metadata:
+ FLOPs: 44570000000 # 44.57 G
+ Parameters: 208200000 # 208.2 M
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 72.34
+ Top 5 Accuracy: 88.02
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/mlp-mixer/mixer-large-p16_3rdparty_64xb64_in1k_20211124-5a2519d2.pth
+ Converted From:
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_mixer_b16_224_in21k-617b3de2.pth
+ Code: https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/mlp_mixer.py#L73
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/mlp_mixer/mlp-mixer-base-p16_64xb64_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/mlp_mixer/mlp-mixer-base-p16_64xb64_in1k.py
new file mode 100644
index 00000000..e35dae55
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/mlp_mixer/mlp-mixer-base-p16_64xb64_in1k.py
@@ -0,0 +1,6 @@
+_base_ = [
+ '../_base_/models/mlp_mixer_base_patch16.py',
+ '../_base_/datasets/imagenet_bs64_mixer_224.py',
+ '../_base_/schedules/imagenet_bs4096_AdamW.py',
+ '../_base_/default_runtime.py',
+]
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/mlp_mixer/mlp-mixer-large-p16_64xb64_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/mlp_mixer/mlp-mixer-large-p16_64xb64_in1k.py
new file mode 100644
index 00000000..459563c8
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/mlp_mixer/mlp-mixer-large-p16_64xb64_in1k.py
@@ -0,0 +1,6 @@
+_base_ = [
+ '../_base_/models/mlp_mixer_large_patch16.py',
+ '../_base_/datasets/imagenet_bs64_mixer_224.py',
+ '../_base_/schedules/imagenet_bs4096_AdamW.py',
+ '../_base_/default_runtime.py',
+]
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/mobilenet_v2/README.md b/openmmlab_test/mmclassification-0.24.1/configs/mobilenet_v2/README.md
new file mode 100644
index 00000000..675c8dd4
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/mobilenet_v2/README.md
@@ -0,0 +1,38 @@
+# MobileNet V2
+
+> [MobileNetV2: Inverted Residuals and Linear Bottlenecks](https://arxiv.org/abs/1801.04381)
+
+
+
+## Abstract
+
+In this paper we describe a new mobile architecture, MobileNetV2, that improves the state of the art performance of mobile models on multiple tasks and benchmarks as well as across a spectrum of different model sizes. We also describe efficient ways of applying these mobile models to object detection in a novel framework we call SSDLite. Additionally, we demonstrate how to build mobile semantic segmentation models through a reduced form of DeepLabv3 which we call Mobile DeepLabv3.
+
+The MobileNetV2 architecture is based on an inverted residual structure where the input and output of the residual block are thin bottleneck layers opposite to traditional residual models which use expanded representations in the input an MobileNetV2 uses lightweight depthwise convolutions to filter features in the intermediate expansion layer. Additionally, we find that it is important to remove non-linearities in the narrow layers in order to maintain representational power. We demonstrate that this improves performance and provide an intuition that led to this design. Finally, our approach allows decoupling of the input/output domains from the expressiveness of the transformation, which provides a convenient framework for further analysis. We measure our performance on Imagenet classification, COCO object detection, VOC image segmentation. We evaluate the trade-offs between accuracy, and number of operations measured by multiply-adds (MAdd), as well as the number of parameters
+
+
+

+
+
+## Results and models
+
+### ImageNet-1k
+
+| Model | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download |
+| :----------: | :-------: | :------: | :-------: | :-------: | :-------------------------------------------------------------------------: | :----------------------------------------------------------------------------: |
+| MobileNet V2 | 3.5 | 0.319 | 71.86 | 90.42 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/mobilenet_v2/mobilenet-v2_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.log.json) |
+
+## Citation
+
+```
+@INPROCEEDINGS{8578572,
+ author={M. {Sandler} and A. {Howard} and M. {Zhu} and A. {Zhmoginov} and L. {Chen}},
+ booktitle={2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition},
+ title={MobileNetV2: Inverted Residuals and Linear Bottlenecks},
+ year={2018},
+ volume={},
+ number={},
+ pages={4510-4520},
+ doi={10.1109/CVPR.2018.00474}}
+}
+```
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/mobilenet_v2/metafile.yml b/openmmlab_test/mmclassification-0.24.1/configs/mobilenet_v2/metafile.yml
new file mode 100644
index 00000000..e16557fb
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/mobilenet_v2/metafile.yml
@@ -0,0 +1,34 @@
+Collections:
+ - Name: MobileNet V2
+ Metadata:
+ Training Data: ImageNet-1k
+ Training Techniques:
+ - SGD with Momentum
+ - Weight Decay
+ Training Resources: 8x V100 GPUs
+ Epochs: 300
+ Batch Size: 256
+ Architecture:
+ - MobileNet V2
+ Paper:
+ URL: https://arxiv.org/abs/1801.04381
+ Title: "MobileNetV2: Inverted Residuals and Linear Bottlenecks"
+ README: configs/mobilenet_v2/README.md
+ Code:
+ URL: https://github.com/open-mmlab/mmclassification/blob/v0.15.0/mmcls/models/backbones/mobilenet_v2.py#L101
+ Version: v0.15.0
+
+Models:
+ - Name: mobilenet-v2_8xb32_in1k
+ Metadata:
+ FLOPs: 319000000
+ Parameters: 3500000
+ In Collection: MobileNet V2
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 71.86
+ Top 5 Accuracy: 90.42
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth
+ Config: configs/mobilenet_v2/mobilenet-v2_8xb32_in1k.py
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/mobilenet_v2/mobilenet-v2_8xb32_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/mobilenet_v2/mobilenet-v2_8xb32_in1k.py
new file mode 100644
index 00000000..88eaad52
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/mobilenet_v2/mobilenet-v2_8xb32_in1k.py
@@ -0,0 +1,8 @@
+_base_ = [
+ '../_base_/models/mobilenet_v2_1x.py',
+ '../_base_/datasets/imagenet_bs32_pil_resize.py',
+ '../_base_/schedules/imagenet_bs256_epochstep.py',
+ '../_base_/default_runtime.py'
+]
+
+#fp16 = dict(loss_scale=512.)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/mobilenet_v2/mobilenet_v2_b32x8_imagenet.py b/openmmlab_test/mmclassification-0.24.1/configs/mobilenet_v2/mobilenet_v2_b32x8_imagenet.py
new file mode 100644
index 00000000..26c2b6de
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/mobilenet_v2/mobilenet_v2_b32x8_imagenet.py
@@ -0,0 +1,6 @@
+_base_ = 'mobilenet-v2_8xb32_in1k.py'
+
+_deprecation_ = dict(
+ expected='mobilenet-v2_8xb32_in1k.py',
+ reference='https://github.com/open-mmlab/mmclassification/pull/508',
+)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/mobilenet_v3/README.md b/openmmlab_test/mmclassification-0.24.1/configs/mobilenet_v3/README.md
new file mode 100644
index 00000000..737c4d32
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/mobilenet_v3/README.md
@@ -0,0 +1,36 @@
+# MobileNet V3
+
+> [Searching for MobileNetV3](https://arxiv.org/abs/1905.02244)
+
+
+
+## Abstract
+
+We present the next generation of MobileNets based on a combination of complementary search techniques as well as a novel architecture design. MobileNetV3 is tuned to mobile phone CPUs through a combination of hardware-aware network architecture search (NAS) complemented by the NetAdapt algorithm and then subsequently improved through novel architecture advances. This paper starts the exploration of how automated search algorithms and network design can work together to harness complementary approaches improving the overall state of the art. Through this process we create two new MobileNet models for release: MobileNetV3-Large and MobileNetV3-Small which are targeted for high and low resource use cases. These models are then adapted and applied to the tasks of object detection and semantic segmentation. For the task of semantic segmentation (or any dense pixel prediction), we propose a new efficient segmentation decoder Lite Reduced Atrous Spatial Pyramid Pooling (LR-ASPP). We achieve new state of the art results for mobile classification, detection and segmentation. MobileNetV3-Large is 3.2% more accurate on ImageNet classification while reducing latency by 15% compared to MobileNetV2. MobileNetV3-Small is 4.6% more accurate while reducing latency by 5% compared to MobileNetV2. MobileNetV3-Large detection is 25% faster at roughly the same accuracy as MobileNetV2 on COCO detection. MobileNetV3-Large LR-ASPP is 30% faster than MobileNetV2 R-ASPP at similar accuracy for Cityscapes segmentation.
+
+
+

+
+
+## Results and models
+
+### ImageNet-1k
+
+| Model | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download |
+| :-----------------: | :-------: | :------: | :-------: | :-------: | :----------------------------------------------------------------------: | :------------------------------------------------------------------------: |
+| MobileNetV3-Small\* | 2.54 | 0.06 | 67.66 | 87.41 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/mobilenet_v3/mobilenet-v3-small_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/mobilenet_v3/convert/mobilenet_v3_small-8427ecf0.pth) |
+| MobileNetV3-Large\* | 5.48 | 0.23 | 74.04 | 91.34 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/mobilenet_v3/mobilenet-v3-large_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/mobilenet_v3/convert/mobilenet_v3_large-3ea3c186.pth) |
+
+*Models with * are converted from [torchvision](https://pytorch.org/vision/stable/_modules/torchvision/models/mobilenetv3.html). The config files of these models are only for validation. We don't ensure these config files' training accuracy and welcome you to contribute your reproduction results.*
+
+## Citation
+
+```
+@inproceedings{Howard_2019_ICCV,
+ author = {Howard, Andrew and Sandler, Mark and Chu, Grace and Chen, Liang-Chieh and Chen, Bo and Tan, Mingxing and Wang, Weijun and Zhu, Yukun and Pang, Ruoming and Vasudevan, Vijay and Le, Quoc V. and Adam, Hartwig},
+ title = {Searching for MobileNetV3},
+ booktitle = {Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV)},
+ month = {October},
+ year = {2019}
+}
+```
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/mobilenet_v3/metafile.yml b/openmmlab_test/mmclassification-0.24.1/configs/mobilenet_v3/metafile.yml
new file mode 100644
index 00000000..09c4732e
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/mobilenet_v3/metafile.yml
@@ -0,0 +1,47 @@
+Collections:
+ - Name: MobileNet V3
+ Metadata:
+ Training Data: ImageNet-1k
+ Training Techniques:
+ - RMSprop with Momentum
+ - Weight Decay
+ Training Resources: 8x V100 GPUs
+ Epochs: 600
+ Batch Size: 1024
+ Architecture:
+ - MobileNet V3
+ Paper:
+ URL: https://arxiv.org/abs/1905.02244
+ Title: Searching for MobileNetV3
+ README: configs/mobilenet_v3/README.md
+ Code:
+ URL: https://github.com/open-mmlab/mmclassification/blob/v0.15.0/mmcls/models/backbones/mobilenet_v3.py
+ Version: v0.15.0
+
+Models:
+ - Name: mobilenet_v3_small_imagenet
+ Metadata:
+ FLOPs: 60000000
+ Parameters: 2540000
+ In Collection: MobileNet V3
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 67.66
+ Top 5 Accuracy: 87.41
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/mobilenet_v3/convert/mobilenet_v3_small-8427ecf0.pth
+ Config: configs/mobilenet_v3/mobilenet-v3-small_8xb32_in1k.py
+ - Name: mobilenet_v3_large_imagenet
+ Metadata:
+ FLOPs: 230000000
+ Parameters: 5480000
+ In Collection: MobileNet V3
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 74.04
+ Top 5 Accuracy: 91.34
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/mobilenet_v3/convert/mobilenet_v3_large-3ea3c186.pth
+ Config: configs/mobilenet_v3/mobilenet-v3-large_8xb32_in1k.py
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/mobilenet_v3/mobilenet-v3-large_8xb32_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/mobilenet_v3/mobilenet-v3-large_8xb32_in1k.py
new file mode 100644
index 00000000..985ef520
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/mobilenet_v3/mobilenet-v3-large_8xb32_in1k.py
@@ -0,0 +1,158 @@
+# Refer to https://pytorch.org/blog/ml-models-torchvision-v0.9/#classification
+# ----------------------------
+# -[x] auto_augment='imagenet'
+# -[x] batch_size=128 (per gpu)
+# -[x] epochs=600
+# -[x] opt='rmsprop'
+# -[x] lr=0.064
+# -[x] eps=0.0316
+# -[x] alpha=0.9
+# -[x] weight_decay=1e-05
+# -[x] momentum=0.9
+# -[x] lr_gamma=0.973
+# -[x] lr_step_size=2
+# -[x] nproc_per_node=8
+# -[x] random_erase=0.2
+# -[x] workers=16 (workers_per_gpu)
+# - modify: RandomErasing use RE-M instead of RE-0
+
+_base_ = [
+ '../_base_/models/mobilenet_v3_large_imagenet.py',
+ '../_base_/datasets/imagenet_bs32_pil_resize.py',
+ '../_base_/default_runtime.py'
+]
+
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+
+policies = [
+ [
+ dict(type='Posterize', bits=4, prob=0.4),
+ dict(type='Rotate', angle=30., prob=0.6)
+ ],
+ [
+ dict(type='Solarize', thr=256 / 9 * 4, prob=0.6),
+ dict(type='AutoContrast', prob=0.6)
+ ],
+ [dict(type='Equalize', prob=0.8),
+ dict(type='Equalize', prob=0.6)],
+ [
+ dict(type='Posterize', bits=5, prob=0.6),
+ dict(type='Posterize', bits=5, prob=0.6)
+ ],
+ [
+ dict(type='Equalize', prob=0.4),
+ dict(type='Solarize', thr=256 / 9 * 5, prob=0.2)
+ ],
+ [
+ dict(type='Equalize', prob=0.4),
+ dict(type='Rotate', angle=30 / 9 * 8, prob=0.8)
+ ],
+ [
+ dict(type='Solarize', thr=256 / 9 * 6, prob=0.6),
+ dict(type='Equalize', prob=0.6)
+ ],
+ [dict(type='Posterize', bits=6, prob=0.8),
+ dict(type='Equalize', prob=1.)],
+ [
+ dict(type='Rotate', angle=10., prob=0.2),
+ dict(type='Solarize', thr=256 / 9, prob=0.6)
+ ],
+ [
+ dict(type='Equalize', prob=0.6),
+ dict(type='Posterize', bits=5, prob=0.4)
+ ],
+ [
+ dict(type='Rotate', angle=30 / 9 * 8, prob=0.8),
+ dict(type='ColorTransform', magnitude=0., prob=0.4)
+ ],
+ [
+ dict(type='Rotate', angle=30., prob=0.4),
+ dict(type='Equalize', prob=0.6)
+ ],
+ [dict(type='Equalize', prob=0.0),
+ dict(type='Equalize', prob=0.8)],
+ [dict(type='Invert', prob=0.6),
+ dict(type='Equalize', prob=1.)],
+ [
+ dict(type='ColorTransform', magnitude=0.4, prob=0.6),
+ dict(type='Contrast', magnitude=0.8, prob=1.)
+ ],
+ [
+ dict(type='Rotate', angle=30 / 9 * 8, prob=0.8),
+ dict(type='ColorTransform', magnitude=0.2, prob=1.)
+ ],
+ [
+ dict(type='ColorTransform', magnitude=0.8, prob=0.8),
+ dict(type='Solarize', thr=256 / 9 * 2, prob=0.8)
+ ],
+ [
+ dict(type='Sharpness', magnitude=0.7, prob=0.4),
+ dict(type='Invert', prob=0.6)
+ ],
+ [
+ dict(
+ type='Shear',
+ magnitude=0.3 / 9 * 5,
+ prob=0.6,
+ direction='horizontal'),
+ dict(type='Equalize', prob=1.)
+ ],
+ [
+ dict(type='ColorTransform', magnitude=0., prob=0.4),
+ dict(type='Equalize', prob=0.6)
+ ],
+ [
+ dict(type='Equalize', prob=0.4),
+ dict(type='Solarize', thr=256 / 9 * 5, prob=0.2)
+ ],
+ [
+ dict(type='Solarize', thr=256 / 9 * 4, prob=0.6),
+ dict(type='AutoContrast', prob=0.6)
+ ],
+ [dict(type='Invert', prob=0.6),
+ dict(type='Equalize', prob=1.)],
+ [
+ dict(type='ColorTransform', magnitude=0.4, prob=0.6),
+ dict(type='Contrast', magnitude=0.8, prob=1.)
+ ],
+ [dict(type='Equalize', prob=0.8),
+ dict(type='Equalize', prob=0.6)],
+]
+
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='RandomResizedCrop', size=224, backend='pillow'),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(type='AutoAugment', policies=policies),
+ dict(
+ type='RandomErasing',
+ erase_prob=0.2,
+ mode='const',
+ min_area_ratio=0.02,
+ max_area_ratio=1 / 3,
+ fill_color=img_norm_cfg['mean']),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+
+data = dict(
+ samples_per_gpu=128,
+ workers_per_gpu=4,
+ train=dict(pipeline=train_pipeline))
+evaluation = dict(interval=10, metric='accuracy')
+
+# optimizer
+optimizer = dict(
+ type='RMSprop',
+ lr=0.064,
+ alpha=0.9,
+ momentum=0.9,
+ eps=0.0316,
+ weight_decay=1e-5)
+optimizer_config = dict(grad_clip=None)
+# learning policy
+lr_config = dict(policy='step', step=2, gamma=0.973, by_epoch=True)
+runner = dict(type='EpochBasedRunner', max_epochs=600)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/mobilenet_v3/mobilenet-v3-small_8xb16_cifar10.py b/openmmlab_test/mmclassification-0.24.1/configs/mobilenet_v3/mobilenet-v3-small_8xb16_cifar10.py
new file mode 100644
index 00000000..06e63dab
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/mobilenet_v3/mobilenet-v3-small_8xb16_cifar10.py
@@ -0,0 +1,8 @@
+_base_ = [
+ '../_base_/models/mobilenet-v3-small_cifar.py',
+ '../_base_/datasets/cifar10_bs16.py',
+ '../_base_/schedules/cifar10_bs128.py', '../_base_/default_runtime.py'
+]
+
+lr_config = dict(policy='step', step=[120, 170])
+runner = dict(type='EpochBasedRunner', max_epochs=200)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/mobilenet_v3/mobilenet-v3-small_8xb32_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/mobilenet_v3/mobilenet-v3-small_8xb32_in1k.py
new file mode 100644
index 00000000..2612166f
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/mobilenet_v3/mobilenet-v3-small_8xb32_in1k.py
@@ -0,0 +1,158 @@
+# Refer to https://pytorch.org/blog/ml-models-torchvision-v0.9/#classification
+# ----------------------------
+# -[x] auto_augment='imagenet'
+# -[x] batch_size=128 (per gpu)
+# -[x] epochs=600
+# -[x] opt='rmsprop'
+# -[x] lr=0.064
+# -[x] eps=0.0316
+# -[x] alpha=0.9
+# -[x] weight_decay=1e-05
+# -[x] momentum=0.9
+# -[x] lr_gamma=0.973
+# -[x] lr_step_size=2
+# -[x] nproc_per_node=8
+# -[x] random_erase=0.2
+# -[x] workers=16 (workers_per_gpu)
+# - modify: RandomErasing use RE-M instead of RE-0
+
+_base_ = [
+ '../_base_/models/mobilenet_v3_small_imagenet.py',
+ '../_base_/datasets/imagenet_bs32_pil_resize.py',
+ '../_base_/default_runtime.py'
+]
+
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+
+policies = [
+ [
+ dict(type='Posterize', bits=4, prob=0.4),
+ dict(type='Rotate', angle=30., prob=0.6)
+ ],
+ [
+ dict(type='Solarize', thr=256 / 9 * 4, prob=0.6),
+ dict(type='AutoContrast', prob=0.6)
+ ],
+ [dict(type='Equalize', prob=0.8),
+ dict(type='Equalize', prob=0.6)],
+ [
+ dict(type='Posterize', bits=5, prob=0.6),
+ dict(type='Posterize', bits=5, prob=0.6)
+ ],
+ [
+ dict(type='Equalize', prob=0.4),
+ dict(type='Solarize', thr=256 / 9 * 5, prob=0.2)
+ ],
+ [
+ dict(type='Equalize', prob=0.4),
+ dict(type='Rotate', angle=30 / 9 * 8, prob=0.8)
+ ],
+ [
+ dict(type='Solarize', thr=256 / 9 * 6, prob=0.6),
+ dict(type='Equalize', prob=0.6)
+ ],
+ [dict(type='Posterize', bits=6, prob=0.8),
+ dict(type='Equalize', prob=1.)],
+ [
+ dict(type='Rotate', angle=10., prob=0.2),
+ dict(type='Solarize', thr=256 / 9, prob=0.6)
+ ],
+ [
+ dict(type='Equalize', prob=0.6),
+ dict(type='Posterize', bits=5, prob=0.4)
+ ],
+ [
+ dict(type='Rotate', angle=30 / 9 * 8, prob=0.8),
+ dict(type='ColorTransform', magnitude=0., prob=0.4)
+ ],
+ [
+ dict(type='Rotate', angle=30., prob=0.4),
+ dict(type='Equalize', prob=0.6)
+ ],
+ [dict(type='Equalize', prob=0.0),
+ dict(type='Equalize', prob=0.8)],
+ [dict(type='Invert', prob=0.6),
+ dict(type='Equalize', prob=1.)],
+ [
+ dict(type='ColorTransform', magnitude=0.4, prob=0.6),
+ dict(type='Contrast', magnitude=0.8, prob=1.)
+ ],
+ [
+ dict(type='Rotate', angle=30 / 9 * 8, prob=0.8),
+ dict(type='ColorTransform', magnitude=0.2, prob=1.)
+ ],
+ [
+ dict(type='ColorTransform', magnitude=0.8, prob=0.8),
+ dict(type='Solarize', thr=256 / 9 * 2, prob=0.8)
+ ],
+ [
+ dict(type='Sharpness', magnitude=0.7, prob=0.4),
+ dict(type='Invert', prob=0.6)
+ ],
+ [
+ dict(
+ type='Shear',
+ magnitude=0.3 / 9 * 5,
+ prob=0.6,
+ direction='horizontal'),
+ dict(type='Equalize', prob=1.)
+ ],
+ [
+ dict(type='ColorTransform', magnitude=0., prob=0.4),
+ dict(type='Equalize', prob=0.6)
+ ],
+ [
+ dict(type='Equalize', prob=0.4),
+ dict(type='Solarize', thr=256 / 9 * 5, prob=0.2)
+ ],
+ [
+ dict(type='Solarize', thr=256 / 9 * 4, prob=0.6),
+ dict(type='AutoContrast', prob=0.6)
+ ],
+ [dict(type='Invert', prob=0.6),
+ dict(type='Equalize', prob=1.)],
+ [
+ dict(type='ColorTransform', magnitude=0.4, prob=0.6),
+ dict(type='Contrast', magnitude=0.8, prob=1.)
+ ],
+ [dict(type='Equalize', prob=0.8),
+ dict(type='Equalize', prob=0.6)],
+]
+
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='RandomResizedCrop', size=224, backend='pillow'),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(type='AutoAugment', policies=policies),
+ dict(
+ type='RandomErasing',
+ erase_prob=0.2,
+ mode='const',
+ min_area_ratio=0.02,
+ max_area_ratio=1 / 3,
+ fill_color=img_norm_cfg['mean']),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+
+data = dict(
+ samples_per_gpu=128,
+ workers_per_gpu=4,
+ train=dict(pipeline=train_pipeline))
+evaluation = dict(interval=10, metric='accuracy')
+
+# optimizer
+optimizer = dict(
+ type='RMSprop',
+ lr=0.064,
+ alpha=0.9,
+ momentum=0.9,
+ eps=0.0316,
+ weight_decay=1e-5)
+optimizer_config = dict(grad_clip=None)
+# learning policy
+lr_config = dict(policy='step', step=2, gamma=0.973, by_epoch=True)
+runner = dict(type='EpochBasedRunner', max_epochs=600)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/mobilenet_v3/mobilenet_v3_large_imagenet.py b/openmmlab_test/mmclassification-0.24.1/configs/mobilenet_v3/mobilenet_v3_large_imagenet.py
new file mode 100644
index 00000000..93e89a49
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/mobilenet_v3/mobilenet_v3_large_imagenet.py
@@ -0,0 +1,6 @@
+_base_ = 'mobilenet-v3-large_8xb32_in1k.py'
+
+_deprecation_ = dict(
+ expected='mobilenet-v3-large_8xb32_in1k.py',
+ reference='https://github.com/open-mmlab/mmclassification/pull/508',
+)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/mobilenet_v3/mobilenet_v3_small_cifar.py b/openmmlab_test/mmclassification-0.24.1/configs/mobilenet_v3/mobilenet_v3_small_cifar.py
new file mode 100644
index 00000000..c09bd1cd
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/mobilenet_v3/mobilenet_v3_small_cifar.py
@@ -0,0 +1,6 @@
+_base_ = 'mobilenet-v3-small_8xb16_cifar10.py'
+
+_deprecation_ = dict(
+ expected='mobilenet-v3-small_8xb16_cifar10.py',
+ reference='https://github.com/open-mmlab/mmclassification/pull/508',
+)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/mobilenet_v3/mobilenet_v3_small_imagenet.py b/openmmlab_test/mmclassification-0.24.1/configs/mobilenet_v3/mobilenet_v3_small_imagenet.py
new file mode 100644
index 00000000..15debd0f
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/mobilenet_v3/mobilenet_v3_small_imagenet.py
@@ -0,0 +1,6 @@
+_base_ = 'mobilenet-v3-small_8xb32_in1k.py'
+
+_deprecation_ = dict(
+ expected='mobilenet-v3-small_8xb32_in1k.py',
+ reference='https://github.com/open-mmlab/mmclassification/pull/508',
+)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/mvit/README.md b/openmmlab_test/mmclassification-0.24.1/configs/mvit/README.md
new file mode 100644
index 00000000..6f5c5608
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/mvit/README.md
@@ -0,0 +1,44 @@
+# MViT V2
+
+> [MViTv2: Improved Multiscale Vision Transformers for Classification and Detection](http://openaccess.thecvf.com//content/CVPR2022/papers/Li_MViTv2_Improved_Multiscale_Vision_Transformers_for_Classification_and_Detection_CVPR_2022_paper.pdf)
+
+
+
+## Abstract
+
+In this paper, we study Multiscale Vision Transformers (MViTv2) as a unified architecture for image and video
+classification, as well as object detection. We present an improved version of MViT that incorporates
+decomposed relative positional embeddings and residual pooling connections. We instantiate this architecture
+in five sizes and evaluate it for ImageNet classification, COCO detection and Kinetics video recognition where
+it outperforms prior work. We further compare MViTv2s' pooling attention to window attention mechanisms where
+it outperforms the latter in accuracy/compute. Without bells-and-whistles, MViTv2 has state-of-the-art
+performance in 3 domains: 88.8% accuracy on ImageNet classification, 58.7 boxAP on COCO object detection as
+well as 86.1% on Kinetics-400 video classification.
+
+
+

+
+
+## Results and models
+
+### ImageNet-1k
+
+| Model | Pretrain | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download |
+| :------------: | :----------: | :-------: | :------: | :-------: | :-------: | :------------------------------------------------------------------: | :---------------------------------------------------------------------: |
+| MViTv2-tiny\* | From scratch | 24.17 | 4.70 | 82.33 | 96.15 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/mvit/mvitv2-tiny_8xb256_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/mvit/mvitv2-tiny_3rdparty_in1k_20220722-db7beeef.pth) |
+| MViTv2-small\* | From scratch | 34.87 | 7.00 | 83.63 | 96.51 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/mvit/mvitv2-small_8xb256_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/mvit/mvitv2-small_3rdparty_in1k_20220722-986bd741.pth) |
+| MViTv2-base\* | From scratch | 51.47 | 10.20 | 84.34 | 96.86 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/mvit/mvitv2-base_8xb256_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/mvit/mvitv2-base_3rdparty_in1k_20220722-9c4f0a17.pth) |
+| MViTv2-large\* | From scratch | 217.99 | 42.10 | 85.25 | 97.14 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/mvit/mvitv2-large_8xb256_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/mvit/mvitv2-large_3rdparty_in1k_20220722-2b57b983.pth) |
+
+*Models with * are converted from the [official repo](https://github.com/facebookresearch/mvit). The config files of these models are only for inference. We don't ensure these config files' training accuracy and welcome you to contribute your reproduction results.*
+
+## Citation
+
+```bibtex
+@inproceedings{li2021improved,
+ title={MViTv2: Improved multiscale vision transformers for classification and detection},
+ author={Li, Yanghao and Wu, Chao-Yuan and Fan, Haoqi and Mangalam, Karttikeya and Xiong, Bo and Malik, Jitendra and Feichtenhofer, Christoph},
+ booktitle={CVPR},
+ year={2022}
+}
+```
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/mvit/metafile.yml b/openmmlab_test/mmclassification-0.24.1/configs/mvit/metafile.yml
new file mode 100644
index 00000000..8d46a0c8
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/mvit/metafile.yml
@@ -0,0 +1,95 @@
+Collections:
+ - Name: MViT V2
+ Metadata:
+ Architecture:
+ - Attention Dropout
+ - Convolution
+ - Dense Connections
+ - GELU
+ - Layer Normalization
+ - Scaled Dot-Product Attention
+ - Attention Pooling
+ Paper:
+ URL: http://openaccess.thecvf.com//content/CVPR2022/papers/Li_MViTv2_Improved_Multiscale_Vision_Transformers_for_Classification_and_Detection_CVPR_2022_paper.pdf
+ Title: 'MViTv2: Improved Multiscale Vision Transformers for Classification and Detection'
+ README: configs/mvit/README.md
+ Code:
+ URL: https://github.com/open-mmlab/mmclassification/blob/v0.24.0/mmcls/models/backbones/mvit.py
+ Version: v0.24.0
+
+Models:
+ - Name: mvitv2-tiny_3rdparty_in1k
+ In Collection: MViT V2
+ Metadata:
+ FLOPs: 4700000000
+ Parameters: 24173320
+ Training Data:
+ - ImageNet-1k
+ Results:
+ - Dataset: ImageNet-1k
+ Task: Image Classification
+ Metrics:
+ Top 1 Accuracy: 82.33
+ Top 5 Accuracy: 96.15
+ Weights: https://download.openmmlab.com/mmclassification/v0/mvit/mvitv2-tiny_3rdparty_in1k_20220722-db7beeef.pth
+ Converted From:
+ Weights: https://dl.fbaipublicfiles.com/mvit/mvitv2_models/MViTv2_T_in1k.pyth
+ Code: https://github.com/facebookresearch/mvit
+ Config: configs/mvit/mvitv2-tiny_8xb256_in1k.py
+
+ - Name: mvitv2-small_3rdparty_in1k
+ In Collection: MViT V2
+ Metadata:
+ FLOPs: 7000000000
+ Parameters: 34870216
+ Training Data:
+ - ImageNet-1k
+ Results:
+ - Dataset: ImageNet-1k
+ Task: Image Classification
+ Metrics:
+ Top 1 Accuracy: 83.63
+ Top 5 Accuracy: 96.51
+ Weights: https://download.openmmlab.com/mmclassification/v0/mvit/mvitv2-small_3rdparty_in1k_20220722-986bd741.pth
+ Converted From:
+ Weights: https://dl.fbaipublicfiles.com/mvit/mvitv2_models/MViTv2_S_in1k.pyth
+ Code: https://github.com/facebookresearch/mvit
+ Config: configs/mvit/mvitv2-small_8xb256_in1k.py
+
+ - Name: mvitv2-base_3rdparty_in1k
+ In Collection: MViT V2
+ Metadata:
+ FLOPs: 10200000000
+ Parameters: 51472744
+ Training Data:
+ - ImageNet-1k
+ Results:
+ - Dataset: ImageNet-1k
+ Task: Image Classification
+ Metrics:
+ Top 1 Accuracy: 84.34
+ Top 5 Accuracy: 96.86
+ Weights: https://download.openmmlab.com/mmclassification/v0/mvit/mvitv2-base_3rdparty_in1k_20220722-9c4f0a17.pth
+ Converted From:
+ Weights: https://dl.fbaipublicfiles.com/mvit/mvitv2_models/MViTv2_B_in1k.pyth
+ Code: https://github.com/facebookresearch/mvit
+ Config: configs/mvit/mvitv2-base_8xb256_in1k.py
+
+ - Name: mvitv2-large_3rdparty_in1k
+ In Collection: MViT V2
+ Metadata:
+ FLOPs: 42100000000
+ Parameters: 217992952
+ Training Data:
+ - ImageNet-1k
+ Results:
+ - Dataset: ImageNet-1k
+ Task: Image Classification
+ Metrics:
+ Top 1 Accuracy: 85.25
+ Top 5 Accuracy: 97.14
+ Weights: https://download.openmmlab.com/mmclassification/v0/mvit/mvitv2-large_3rdparty_in1k_20220722-2b57b983.pth
+ Converted From:
+ Weights: https://dl.fbaipublicfiles.com/mvit/mvitv2_models/MViTv2_L_in1k.pyth
+ Code: https://github.com/facebookresearch/mvit
+ Config: configs/mvit/mvitv2-large_8xb256_in1k.py
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/mvit/mvitv2-base_8xb256_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/mvit/mvitv2-base_8xb256_in1k.py
new file mode 100644
index 00000000..ea92cf40
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/mvit/mvitv2-base_8xb256_in1k.py
@@ -0,0 +1,29 @@
+_base_ = [
+ '../_base_/models/mvit/mvitv2-base.py',
+ '../_base_/datasets/imagenet_bs64_swin_224.py',
+ '../_base_/schedules/imagenet_bs1024_adamw_swin.py',
+ '../_base_/default_runtime.py'
+]
+
+# dataset settings
+data = dict(samples_per_gpu=256)
+
+# schedule settings
+paramwise_cfg = dict(
+ norm_decay_mult=0.0,
+ bias_decay_mult=0.0,
+ custom_keys={
+ '.pos_embed': dict(decay_mult=0.0),
+ '.rel_pos_h': dict(decay_mult=0.0),
+ '.rel_pos_w': dict(decay_mult=0.0)
+ })
+
+optimizer = dict(lr=0.00025, paramwise_cfg=paramwise_cfg)
+optimizer_config = dict(grad_clip=dict(max_norm=1.0))
+
+# learning policy
+lr_config = dict(
+ policy='CosineAnnealing',
+ warmup='linear',
+ warmup_iters=70,
+ warmup_by_epoch=True)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/mvit/mvitv2-large_8xb256_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/mvit/mvitv2-large_8xb256_in1k.py
new file mode 100644
index 00000000..fbb81d69
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/mvit/mvitv2-large_8xb256_in1k.py
@@ -0,0 +1,29 @@
+_base_ = [
+ '../_base_/models/mvit/mvitv2-large.py',
+ '../_base_/datasets/imagenet_bs64_swin_224.py',
+ '../_base_/schedules/imagenet_bs2048_AdamW.py',
+ '../_base_/default_runtime.py'
+]
+
+# dataset settings
+data = dict(samples_per_gpu=256)
+
+# schedule settings
+paramwise_cfg = dict(
+ norm_decay_mult=0.0,
+ bias_decay_mult=0.0,
+ custom_keys={
+ '.pos_embed': dict(decay_mult=0.0),
+ '.rel_pos_h': dict(decay_mult=0.0),
+ '.rel_pos_w': dict(decay_mult=0.0)
+ })
+
+optimizer = dict(lr=0.00025, paramwise_cfg=paramwise_cfg)
+optimizer_config = dict(grad_clip=dict(max_norm=1.0))
+
+# learning policy
+lr_config = dict(
+ policy='CosineAnnealing',
+ warmup='linear',
+ warmup_iters=70,
+ warmup_by_epoch=True)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/mvit/mvitv2-small_8xb256_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/mvit/mvitv2-small_8xb256_in1k.py
new file mode 100644
index 00000000..18038593
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/mvit/mvitv2-small_8xb256_in1k.py
@@ -0,0 +1,29 @@
+_base_ = [
+ '../_base_/models/mvit/mvitv2-small.py',
+ '../_base_/datasets/imagenet_bs64_swin_224.py',
+ '../_base_/schedules/imagenet_bs2048_AdamW.py',
+ '../_base_/default_runtime.py'
+]
+
+# dataset settings
+data = dict(samples_per_gpu=256)
+
+# schedule settings
+paramwise_cfg = dict(
+ norm_decay_mult=0.0,
+ bias_decay_mult=0.0,
+ custom_keys={
+ '.pos_embed': dict(decay_mult=0.0),
+ '.rel_pos_h': dict(decay_mult=0.0),
+ '.rel_pos_w': dict(decay_mult=0.0)
+ })
+
+optimizer = dict(lr=0.00025, paramwise_cfg=paramwise_cfg)
+optimizer_config = dict(grad_clip=dict(max_norm=1.0))
+
+# learning policy
+lr_config = dict(
+ policy='CosineAnnealing',
+ warmup='linear',
+ warmup_iters=70,
+ warmup_by_epoch=True)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/mvit/mvitv2-tiny_8xb256_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/mvit/mvitv2-tiny_8xb256_in1k.py
new file mode 100644
index 00000000..f4b9bc48
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/mvit/mvitv2-tiny_8xb256_in1k.py
@@ -0,0 +1,29 @@
+_base_ = [
+ '../_base_/models/mvit/mvitv2-tiny.py',
+ '../_base_/datasets/imagenet_bs64_swin_224.py',
+ '../_base_/schedules/imagenet_bs2048_AdamW.py',
+ '../_base_/default_runtime.py'
+]
+
+# dataset settings
+data = dict(samples_per_gpu=256)
+
+# schedule settings
+paramwise_cfg = dict(
+ norm_decay_mult=0.0,
+ bias_decay_mult=0.0,
+ custom_keys={
+ '.pos_embed': dict(decay_mult=0.0),
+ '.rel_pos_h': dict(decay_mult=0.0),
+ '.rel_pos_w': dict(decay_mult=0.0)
+ })
+
+optimizer = dict(lr=0.00025, paramwise_cfg=paramwise_cfg)
+optimizer_config = dict(grad_clip=dict(max_norm=1.0))
+
+# learning policy
+lr_config = dict(
+ policy='CosineAnnealing',
+ warmup='linear',
+ warmup_iters=70,
+ warmup_by_epoch=True)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/poolformer/README.md b/openmmlab_test/mmclassification-0.24.1/configs/poolformer/README.md
new file mode 100644
index 00000000..cc557e10
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/poolformer/README.md
@@ -0,0 +1,38 @@
+# PoolFormer
+
+> [MetaFormer is Actually What You Need for Vision](https://arxiv.org/abs/2111.11418)
+
+
+
+## Abstract
+
+Transformers have shown great potential in computer vision tasks. A common belief is their attention-based token mixer module contributes most to their competence. However, recent works show the attention-based module in transformers can be replaced by spatial MLPs and the resulted models still perform quite well. Based on this observation, we hypothesize that the general architecture of the transformers, instead of the specific token mixer module, is more essential to the model's performance. To verify this, we deliberately replace the attention module in transformers with an embarrassingly simple spatial pooling operator to conduct only basic token mixing. Surprisingly, we observe that the derived model, termed as PoolFormer, achieves competitive performance on multiple computer vision tasks. For example, on ImageNet-1K, PoolFormer achieves 82.1% top-1 accuracy, surpassing well-tuned vision transformer/MLP-like baselines DeiT-B/ResMLP-B24 by 0.3%/1.1% accuracy with 35%/52% fewer parameters and 49%/61% fewer MACs. The effectiveness of PoolFormer verifies our hypothesis and urges us to initiate the concept of "MetaFormer", a general architecture abstracted from transformers without specifying the token mixer. Based on the extensive experiments, we argue that MetaFormer is the key player in achieving superior results for recent transformer and MLP-like models on vision tasks. This work calls for more future research dedicated to improving MetaFormer instead of focusing on the token mixer modules. Additionally, our proposed PoolFormer could serve as a starting baseline for future MetaFormer architecture design.
+
+
+

+
+
+## Results and models
+
+### ImageNet-1k
+
+| Model | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download |
+| :--------------: | :-------: | :------: | :-------: | :-------: | :-----------------------------------------------------------------------: | :--------------------------------------------------------------------------: |
+| PoolFormer-S12\* | 11.92 | 1.87 | 77.24 | 93.51 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/poolformer/poolformer-s12_32xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/poolformer/poolformer-s12_3rdparty_32xb128_in1k_20220414-f8d83051.pth) |
+| PoolFormer-S24\* | 21.39 | 3.51 | 80.33 | 95.05 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/poolformer/poolformer-s24_32xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/poolformer/poolformer-s24_3rdparty_32xb128_in1k_20220414-d7055904.pth) |
+| PoolFormer-S36\* | 30.86 | 5.15 | 81.43 | 95.45 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/poolformer/poolformer-s36_32xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/poolformer/poolformer-s36_3rdparty_32xb128_in1k_20220414-d78ff3e8.pth) |
+| PoolFormer-M36\* | 56.17 | 8.96 | 82.14 | 95.71 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/poolformer/poolformer-m36_32xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/poolformer/poolformer-m36_3rdparty_32xb128_in1k_20220414-c55e0949.pth) |
+| PoolFormer-M48\* | 73.47 | 11.80 | 82.51 | 95.95 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/poolformer/poolformer-m48_32xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/poolformer/poolformer-m48_3rdparty_32xb128_in1k_20220414-9378f3eb.pth) |
+
+*Models with * are converted from the [official repo](https://github.com/sail-sg/poolformer). The config files of these models are only for inference. We don't ensure these config files' training accuracy and welcome you to contribute your reproduction results.*
+
+## Citation
+
+```bibtex
+@article{yu2021metaformer,
+ title={MetaFormer is Actually What You Need for Vision},
+ author={Yu, Weihao and Luo, Mi and Zhou, Pan and Si, Chenyang and Zhou, Yichen and Wang, Xinchao and Feng, Jiashi and Yan, Shuicheng},
+ journal={arXiv preprint arXiv:2111.11418},
+ year={2021}
+}
+```
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/poolformer/metafile.yml b/openmmlab_test/mmclassification-0.24.1/configs/poolformer/metafile.yml
new file mode 100644
index 00000000..d94219d1
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/poolformer/metafile.yml
@@ -0,0 +1,99 @@
+Collections:
+ - Name: PoolFormer
+ Metadata:
+ Training Data: ImageNet-1k
+ Architecture:
+ - Pooling
+ - 1x1 Convolution
+ - LayerScale
+ Paper:
+ URL: https://arxiv.org/abs/2111.11418
+ Title: MetaFormer is Actually What You Need for Vision
+ README: configs/poolformer/README.md
+ Code:
+ Version: v0.22.1
+ URL: https://github.com/open-mmlab/mmclassification/blob/v0.22.1/mmcls/models/backbones/poolformer.py
+
+Models:
+ - Name: poolformer-s12_3rdparty_32xb128_in1k
+ Metadata:
+ FLOPs: 1871399424
+ Parameters: 11915176
+ In Collections: PoolFormer
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 77.24
+ Top 5 Accuracy: 93.51
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/poolformer/poolformer-s12_3rdparty_32xb128_in1k_20220414-f8d83051.pth
+ Config: configs/poolformer/poolformer-s12_32xb128_in1k.py
+ Converted From:
+ Weights: https://github.com/sail-sg/poolformer/releases/download/v1.0/poolformer_s12.pth.tar
+ Code: https://github.com/sail-sg/poolformer
+ - Name: poolformer-s24_3rdparty_32xb128_in1k
+ Metadata:
+ Training Data: ImageNet-1k
+ FLOPs: 3510411008
+ Parameters: 21388968
+ In Collections: PoolFormer
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 80.33
+ Top 5 Accuracy: 95.05
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/poolformer/poolformer-s24_3rdparty_32xb128_in1k_20220414-d7055904.pth
+ Config: configs/poolformer/poolformer-s24_32xb128_in1k.py
+ Converted From:
+ Weights: https://github.com/sail-sg/poolformer/releases/download/v1.0/poolformer_s24.pth.tar
+ Code: https://github.com/sail-sg/poolformer
+ - Name: poolformer-s36_3rdparty_32xb128_in1k
+ Metadata:
+ FLOPs: 5149422592
+ Parameters: 30862760
+ In Collections: PoolFormer
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 81.43
+ Top 5 Accuracy: 95.45
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/poolformer/poolformer-s36_3rdparty_32xb128_in1k_20220414-d78ff3e8.pth
+ Config: configs/poolformer/poolformer-s36_32xb128_in1k.py
+ Converted From:
+ Weights: https://github.com/sail-sg/poolformer/releases/download/v1.0/poolformer_s36.pth.tar
+ Code: https://github.com/sail-sg/poolformer
+ - Name: poolformer-m36_3rdparty_32xb128_in1k
+ Metadata:
+ Training Data: ImageNet-1k
+ FLOPs: 8960175744
+ Parameters: 56172520
+ In Collections: PoolFormer
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 82.14
+ Top 5 Accuracy: 95.71
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/poolformer/poolformer-m36_3rdparty_32xb128_in1k_20220414-c55e0949.pth
+ Config: configs/poolformer/poolformer-m36_32xb128_in1k.py
+ Converted From:
+ Weights: https://github.com/sail-sg/poolformer/releases/download/v1.0/poolformer_m36.pth.tar
+ Code: https://github.com/sail-sg/poolformer
+ - Name: poolformer-m48_3rdparty_32xb128_in1k
+ Metadata:
+ FLOPs: 11801805696
+ Parameters: 73473448
+ In Collections: PoolFormer
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 82.51
+ Top 5 Accuracy: 95.95
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/poolformer/poolformer-m48_3rdparty_32xb128_in1k_20220414-9378f3eb.pth
+ Config: configs/poolformer/poolformer-m48_32xb128_in1k.py
+ Converted From:
+ Weights: https://github.com/sail-sg/poolformer/releases/download/v1.0/poolformer_m48.pth.tar
+ Code: https://github.com/sail-sg/poolformer
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/poolformer/poolformer-m36_32xb128_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/poolformer/poolformer-m36_32xb128_in1k.py
new file mode 100644
index 00000000..1937a786
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/poolformer/poolformer-m36_32xb128_in1k.py
@@ -0,0 +1,8 @@
+_base_ = [
+ '../_base_/models/poolformer/poolformer_m36.py',
+ '../_base_/datasets/imagenet_bs128_poolformer_medium_224.py',
+ '../_base_/schedules/imagenet_bs1024_adamw_swin.py',
+ '../_base_/default_runtime.py',
+]
+
+optimizer = dict(lr=4e-3)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/poolformer/poolformer-m48_32xb128_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/poolformer/poolformer-m48_32xb128_in1k.py
new file mode 100644
index 00000000..a65b76a6
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/poolformer/poolformer-m48_32xb128_in1k.py
@@ -0,0 +1,8 @@
+_base_ = [
+ '../_base_/models/poolformer/poolformer_m48.py',
+ '../_base_/datasets/imagenet_bs128_poolformer_medium_224.py',
+ '../_base_/schedules/imagenet_bs1024_adamw_swin.py',
+ '../_base_/default_runtime.py',
+]
+
+optimizer = dict(lr=4e-3)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/poolformer/poolformer-s12_32xb128_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/poolformer/poolformer-s12_32xb128_in1k.py
new file mode 100644
index 00000000..98027c07
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/poolformer/poolformer-s12_32xb128_in1k.py
@@ -0,0 +1,8 @@
+_base_ = [
+ '../_base_/models/poolformer/poolformer_s12.py',
+ '../_base_/datasets/imagenet_bs128_poolformer_small_224.py',
+ '../_base_/schedules/imagenet_bs1024_adamw_swin.py',
+ '../_base_/default_runtime.py',
+]
+
+optimizer = dict(lr=4e-3)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/poolformer/poolformer-s24_32xb128_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/poolformer/poolformer-s24_32xb128_in1k.py
new file mode 100644
index 00000000..97742594
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/poolformer/poolformer-s24_32xb128_in1k.py
@@ -0,0 +1,8 @@
+_base_ = [
+ '../_base_/models/poolformer/poolformer_s24.py',
+ '../_base_/datasets/imagenet_bs128_poolformer_small_224.py',
+ '../_base_/schedules/imagenet_bs1024_adamw_swin.py',
+ '../_base_/default_runtime.py',
+]
+
+optimizer = dict(lr=4e-3)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/poolformer/poolformer-s36_32xb128_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/poolformer/poolformer-s36_32xb128_in1k.py
new file mode 100644
index 00000000..4d742d37
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/poolformer/poolformer-s36_32xb128_in1k.py
@@ -0,0 +1,8 @@
+_base_ = [
+ '../_base_/models/poolformer/poolformer_s36.py',
+ '../_base_/datasets/imagenet_bs128_poolformer_small_224.py',
+ '../_base_/schedules/imagenet_bs1024_adamw_swin.py',
+ '../_base_/default_runtime.py',
+]
+
+optimizer = dict(lr=4e-3)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/regnet/README.md b/openmmlab_test/mmclassification-0.24.1/configs/regnet/README.md
new file mode 100644
index 00000000..1ae074d6
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/regnet/README.md
@@ -0,0 +1,51 @@
+# RegNet
+
+> [Designing Network Design Spaces](https://arxiv.org/abs/2003.13678)
+
+
+
+## Abstract
+
+In this work, we present a new network design paradigm. Our goal is to help advance the understanding of network design and discover design principles that generalize across settings. Instead of focusing on designing individual network instances, we design network design spaces that parametrize populations of networks. The overall process is analogous to classic manual design of networks, but elevated to the design space level. Using our methodology we explore the structure aspect of network design and arrive at a low-dimensional design space consisting of simple, regular networks that we call RegNet. The core insight of the RegNet parametrization is surprisingly simple: widths and depths of good networks can be explained by a quantized linear function. We analyze the RegNet design space and arrive at interesting findings that do not match the current practice of network design. The RegNet design space provides simple and fast networks that work well across a wide range of flop regimes. Under comparable training settings and flops, the RegNet models outperform the popular EfficientNet models while being up to 5x faster on GPUs.
+
+
+

+
+
+## Results and models
+
+### ImageNet-1k
+
+| Model | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download |
+| :-------------: | :-------: | :------: | :-------: | :-------: | :------------------------------------------------------------------------: | :--------------------------------------------------------------------------: |
+| RegNetX-400MF | 5.16 | 0.41 | 72.56 | 90.78 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/regnet/regnetx-400mf_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-400mf_8xb128_in1k_20211213-89bfc226.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-400mf_8xb128_in1k_20211208_143316.log.json) |
+| RegNetX-800MF | 7.26 | 0.81 | 74.76 | 92.32 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/regnet/regnetx-800mf_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-800mf_8xb128_in1k_20211213-222b0f11.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-800mf_8xb128_in1k_20211207_143037.log.json) |
+| RegNetX-1.6GF | 9.19 | 1.63 | 76.84 | 93.31 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/regnet/regnetx-1.6gf_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-1.6gf_8xb128_in1k_20211213-d1b89758.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-1.6gf_8xb128_in1k_20211208_143018.log.json) |
+| RegNetX-3.2GF | 15.3 | 3.21 | 78.09 | 94.08 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/regnet/regnetx-3.2gf_8xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-3.2gf_8xb64_in1k_20211213-1fdd82ae.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-3.2gf_8xb64_in1k_20211208_142720.log.json) |
+| RegNetX-4.0GF | 22.12 | 4.0 | 78.60 | 94.17 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/regnet/regnetx-4.0gf_8xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-4.0gf_8xb64_in1k_20211213-efed675c.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-4.0gf_8xb64_in1k_20211207_150431.log.json) |
+| RegNetX-6.4GF | 26.21 | 6.51 | 79.38 | 94.65 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/regnet/regnetx-6.4gf_8xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-6.4gf_8xb64_in1k_20211215-5c6089da.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-6.4gf_8xb64_in1k_20211213_172748.log.json) |
+| RegNetX-8.0GF | 39.57 | 8.03 | 79.12 | 94.51 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/regnet/regnetx-8.0gf_8xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-8.0gf_8xb64_in1k_20211213-9a9fcc76.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-8.0gf_8xb64_in1k_20211208_103250.log.json) |
+| RegNetX-12GF | 46.11 | 12.15 | 79.67 | 95.03 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/regnet/regnetx-12gf_8xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-12gf_8xb64_in1k_20211213-5df8c2f8.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-12gf_8xb64_in1k_20211208_143713.log.json) |
+| RegNetX-400MF\* | 5.16 | 0.41 | 72.55 | 90.91 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/regnet/regnetx-400mf_8xb128_in1k) | [model](https://download.openmmlab.com/mmclassification/v0/regnet/convert/RegNetX-400MF-0db9f35c.pth) |
+| RegNetX-800MF\* | 7.26 | 0.81 | 75.21 | 92.37 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/regnet/regnetx-800mf_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/regnet/convert/RegNetX-800MF-4f9d1e8a.pth) |
+| RegNetX-1.6GF\* | 9.19 | 1.63 | 77.04 | 93.51 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/regnet/regnetx-1.6gf_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/regnet/convert/RegNetX-1.6GF-cfb32375.pth) |
+| RegNetX-3.2GF\* | 15.3 | 3.21 | 78.26 | 94.20 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/regnet/regnetx-3.2gf_8xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/regnet/convert/RegNetX-3.2GF-82c43fd5.pth) |
+| RegNetX-4.0GF\* | 22.12 | 4.0 | 78.72 | 94.22 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/regnet/regnetx-4.0gf_8xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/regnet/convert/RegNetX-4.0GF-ef8bb32c.pth) |
+| RegNetX-6.4GF\* | 26.21 | 6.51 | 79.22 | 94.61 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/regnet/regnetx-6.4gf_8xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/regnet/convert/RegNetX-6.4GF-6888c0ea.pth) |
+| RegNetX-8.0GF\* | 39.57 | 8.03 | 79.31 | 94.57 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/regnet/regnetx-8.0gf_8xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/regnet/convert/RegNetX-8.0GF-cb4c77ec.pth) |
+| RegNetX-12GF\* | 46.11 | 12.15 | 79.91 | 94.78 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/regnet/regnetx-12gf_8xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/regnet/convert/RegNetX-12GF-0574538f.pth) |
+
+*Models with * are converted from [pycls](https://github.com/facebookresearch/pycls/blob/master/MODEL_ZOO.md). The config files of these models are only for validation.*
+
+## Citation
+
+```
+@article{radosavovic2020designing,
+ title={Designing Network Design Spaces},
+ author={Ilija Radosavovic and Raj Prateek Kosaraju and Ross Girshick and Kaiming He and Piotr Dollár},
+ year={2020},
+ eprint={2003.13678},
+ archivePrefix={arXiv},
+ primaryClass={cs.CV}
+}
+```
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/regnet/metafile.yml b/openmmlab_test/mmclassification-0.24.1/configs/regnet/metafile.yml
new file mode 100644
index 00000000..6b301abb
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/regnet/metafile.yml
@@ -0,0 +1,122 @@
+Collections:
+ - Name: RegNet
+ Metadata:
+ Training Data: ImageNet-1k
+ Architecture:
+ - Neural Architecture Search
+ - Design Space Design
+ - Precise BN
+ - SGD with nesterov
+ Paper:
+ URL: https://arxiv.org/abs/2003.13678
+ Title: Designing Network Design Spaces
+ README: configs/regnet/README.md
+ Code:
+ URL: https://github.com/open-mmlab/mmclassification/blob/v0.18.0/mmcls/models/backbones/regnet.py
+ Version: v0.18.0
+
+Models:
+ - Name: regnetx-400mf_8xb128_in1k
+ In Collection: RegNet
+ Config: configs/regnet/regnetx-400mf_8xb128_in1k.py
+ Metadata:
+ FLOPs: 410000000 # 0.41G
+ Parameters: 5160000 # 5.16M
+ Results:
+ - Dataset: ImageNet-1k
+ Task: Image Classification
+ Metrics:
+ Top 1 Accuracy: 72.56
+ Top 5 Accuracy: 90.78
+ Weights: https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-400mf_8xb128_in1k_20211213-89bfc226.pth
+ - Name: regnetx-800mf_8xb128_in1k
+ In Collection: RegNet
+ Config: configs/regnet/regnetx-800mf_8xb128_in1k.py
+ Metadata:
+ FLOPs: 810000000 # 0.81G
+ Parameters: 7260000 # 7.26M
+ Results:
+ - Dataset: ImageNet-1k
+ Task: Image Classification
+ Metrics:
+ Top 1 Accuracy: 74.76
+ Top 5 Accuracy: 92.32
+ Weights: https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-800mf_8xb128_in1k_20211213-222b0f11.pth
+ - Name: regnetx-1.6gf_8xb128_in1k
+ In Collection: RegNet
+ Config: configs/regnet/regnetx-1.6gf_8xb128_in1k.py
+ Metadata:
+ FLOPs: 1630000000 # 1.63G
+ Parameters: 9190000 # 9.19M
+ Results:
+ - Dataset: ImageNet-1k
+ Task: Image Classification
+ Metrics:
+ Top 1 Accuracy: 76.84
+ Top 5 Accuracy: 93.31
+ Weights: https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-1.6gf_8xb128_in1k_20211213-d1b89758.pth
+ - Name: regnetx-3.2gf_8xb64_in1k
+ In Collection: RegNet
+ Config: configs/regnet/regnetx-3.2gf_8xb64_in1k.py
+ Metadata:
+ FLOPs: 1530000000 # 1.53G
+ Parameters: 3210000 # 32.1M
+ Results:
+ - Dataset: ImageNet-1k
+ Task: Image Classification
+ Metrics:
+ Top 1 Accuracy: 78.09
+ Top 5 Accuracy: 94.08
+ Weights: https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-3.2gf_8xb64_in1k_20211213-1fdd82ae.pth
+ - Name: regnetx-4.0gf_8xb64_in1k
+ In Collection: RegNet
+ Config: configs/regnet/regnetx-4.0gf_8xb64_in1k.py
+ Metadata:
+ FLOPs: 4000000000 # 4G
+ Parameters: 22120000 # 22.12M
+ Results:
+ - Dataset: ImageNet-1k
+ Task: Image Classification
+ Metrics:
+ Top 1 Accuracy: 78.60
+ Top 5 Accuracy: 94.17
+ Weights: https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-4.0gf_8xb64_in1k_20211213-efed675c.pth
+ - Name: regnetx-6.4gf_8xb64_in1k
+ In Collection: RegNet
+ Config: configs/regnet/regnetx-6.4gf_8xb64_in1k.py
+ Metadata:
+ FLOPs: 6510000000 # 6.51G
+ Parameters: 26210000 # 26.21M
+ Results:
+ - Dataset: ImageNet-1k
+ Task: Image Classification
+ Metrics:
+ Top 1 Accuracy: 79.38
+ Top 5 Accuracy: 94.65
+ Weights: https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-6.4gf_8xb64_in1k_20211215-5c6089da.pth
+ - Name: regnetx-8.0gf_8xb64_in1k
+ In Collection: RegNet
+ Config: configs/regnet/regnetx-8.0gf_8xb64_in1k.py
+ Metadata:
+ FLOPs: 8030000000 # 8.03G
+ Parameters: 39570000 # 39.57M
+ Results:
+ - Dataset: ImageNet-1k
+ Task: Image Classification
+ Metrics:
+ Top 1 Accuracy: 79.12
+ Top 5 Accuracy: 94.51
+ Weights: https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-8.0gf_8xb64_in1k_20211213-9a9fcc76.pth
+ - Name: regnetx-12gf_8xb64_in1k
+ In Collection: RegNet
+ Config: configs/regnet/regnetx-12gf_8xb64_in1k.py
+ Metadata:
+ FLOPs: 12150000000 # 12.15G
+ Parameters: 46110000 # 46.11M
+ Results:
+ - Dataset: ImageNet-1k
+ Task: Image Classification
+ Metrics:
+ Top 1 Accuracy: 79.67
+ Top 5 Accuracy: 95.03
+ Weights: https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-12gf_8xb64_in1k_20211213-5df8c2f8.pth
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/regnet/regnetx-1.6gf_8xb128_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/regnet/regnetx-1.6gf_8xb128_in1k.py
new file mode 100644
index 00000000..d3e9e934
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/regnet/regnetx-1.6gf_8xb128_in1k.py
@@ -0,0 +1,6 @@
+_base_ = ['./regnetx-400mf_8xb128_in1k.py']
+
+# model settings
+model = dict(
+ backbone=dict(type='RegNet', arch='regnetx_1.6gf'),
+ head=dict(in_channels=912, ))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/regnet/regnetx-12gf_8xb64_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/regnet/regnetx-12gf_8xb64_in1k.py
new file mode 100644
index 00000000..5da0ebe9
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/regnet/regnetx-12gf_8xb64_in1k.py
@@ -0,0 +1,11 @@
+_base_ = ['./regnetx-400mf_8xb128_in1k.py']
+
+# model settings
+model = dict(
+ backbone=dict(type='RegNet', arch='regnetx_12gf'),
+ head=dict(in_channels=2240, ))
+
+# for batch_size 512, use lr = 0.4
+optimizer = dict(lr=0.4)
+
+data = dict(samples_per_gpu=64, )
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/regnet/regnetx-3.2gf_8xb64_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/regnet/regnetx-3.2gf_8xb64_in1k.py
new file mode 100644
index 00000000..98c4a0b3
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/regnet/regnetx-3.2gf_8xb64_in1k.py
@@ -0,0 +1,11 @@
+_base_ = ['./regnetx-400mf_8xb128_in1k.py']
+
+# model settings
+model = dict(
+ backbone=dict(type='RegNet', arch='regnetx_3.2gf'),
+ head=dict(in_channels=1008, ))
+
+# for batch_size 512, use lr = 0.4
+optimizer = dict(lr=0.4)
+
+data = dict(samples_per_gpu=64, )
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/regnet/regnetx-4.0gf_8xb64_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/regnet/regnetx-4.0gf_8xb64_in1k.py
new file mode 100644
index 00000000..87bc8470
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/regnet/regnetx-4.0gf_8xb64_in1k.py
@@ -0,0 +1,11 @@
+_base_ = ['./regnetx-400mf_8xb128_in1k.py']
+
+# model settings
+model = dict(
+ backbone=dict(type='RegNet', arch='regnetx_4.0gf'),
+ head=dict(in_channels=1360, ))
+
+# for batch_size 512, use lr = 0.4
+optimizer = dict(lr=0.4)
+
+data = dict(samples_per_gpu=64, )
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/regnet/regnetx-400mf_8xb128_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/regnet/regnetx-400mf_8xb128_in1k.py
new file mode 100644
index 00000000..86fee908
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/regnet/regnetx-400mf_8xb128_in1k.py
@@ -0,0 +1,77 @@
+_base_ = [
+ '../_base_/models/regnet/regnetx_400mf.py',
+ '../_base_/datasets/imagenet_bs32.py',
+ '../_base_/schedules/imagenet_bs1024_coslr.py',
+ '../_base_/default_runtime.py'
+]
+
+# Precise BN hook will update the bn stats, so this hook should be executed
+# before CheckpointHook, which has priority of 'NORMAL'. So set the
+# priority of PreciseBNHook to 'ABOVE_NORMAL' here.
+custom_hooks = [
+ dict(
+ type='PreciseBNHook',
+ num_samples=8192,
+ interval=1,
+ priority='ABOVE_NORMAL')
+]
+
+# sgd with nesterov, base ls is 0.8 for batch_size 1024,
+# 0.4 for batch_size 512 and 0.2 for batch_size 256 when training ImageNet1k
+optimizer = dict(lr=0.8, nesterov=True)
+
+# dataset settings
+dataset_type = 'ImageNet'
+
+# normalization params, in order of BGR
+NORM_MEAN = [103.53, 116.28, 123.675]
+NORM_STD = [57.375, 57.12, 58.395]
+
+# lighting params, in order of RGB, from repo. pycls
+EIGVAL = [0.2175, 0.0188, 0.0045]
+EIGVEC = [[-0.5675, 0.7192, 0.4009], [-0.5808, -0.0045, -0.814],
+ [-0.5836, -0.6948, 0.4203]]
+
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='RandomResizedCrop', size=224),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(
+ type='Lighting',
+ eigval=EIGVAL,
+ eigvec=EIGVEC,
+ alphastd=25.5, # because the value range of images is [0,255]
+ to_rgb=True
+ ), # BGR image from cv2 in LoadImageFromFile, convert to RGB here
+ dict(type='Normalize', mean=NORM_MEAN, std=NORM_STD,
+ to_rgb=True), # RGB2BGR
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='Resize', size=(256, -1)),
+ dict(type='CenterCrop', crop_size=224),
+ dict(type='Normalize', mean=NORM_MEAN, std=NORM_STD, to_rgb=False),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+data = dict(
+ samples_per_gpu=128,
+ workers_per_gpu=8,
+ train=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/train',
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline),
+ test=dict(
+ # replace `data/val` with `data/test` for standard test
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/regnet/regnetx-6.4gf_8xb64_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/regnet/regnetx-6.4gf_8xb64_in1k.py
new file mode 100644
index 00000000..02ee424b
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/regnet/regnetx-6.4gf_8xb64_in1k.py
@@ -0,0 +1,11 @@
+_base_ = ['./regnetx-400mf_8xb128_in1k.py']
+
+# model settings
+model = dict(
+ backbone=dict(type='RegNet', arch='regnetx_6.4gf'),
+ head=dict(in_channels=1624, ))
+
+# for batch_size 512, use lr = 0.4
+optimizer = dict(lr=0.4)
+
+data = dict(samples_per_gpu=64, )
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/regnet/regnetx-8.0gf_8xb64_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/regnet/regnetx-8.0gf_8xb64_in1k.py
new file mode 100644
index 00000000..84ab8114
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/regnet/regnetx-8.0gf_8xb64_in1k.py
@@ -0,0 +1,11 @@
+_base_ = ['./regnetx-400mf_8xb128_in1k.py']
+
+# model settings
+model = dict(
+ backbone=dict(type='RegNet', arch='regnetx_8.0gf'),
+ head=dict(in_channels=1920, ))
+
+# for batch_size 512, use lr = 0.4
+optimizer = dict(lr=0.4)
+
+data = dict(samples_per_gpu=64, )
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/regnet/regnetx-800mf_8xb128_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/regnet/regnetx-800mf_8xb128_in1k.py
new file mode 100644
index 00000000..9cd71379
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/regnet/regnetx-800mf_8xb128_in1k.py
@@ -0,0 +1,6 @@
+_base_ = ['./regnetx-400mf_8xb128_in1k.py']
+
+# model settings
+model = dict(
+ backbone=dict(type='RegNet', arch='regnetx_800mf'),
+ head=dict(in_channels=672, ))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/repmlp/README.md b/openmmlab_test/mmclassification-0.24.1/configs/repmlp/README.md
new file mode 100644
index 00000000..45334635
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/repmlp/README.md
@@ -0,0 +1,93 @@
+# RepMLP
+
+> [RepMLP: Re-parameterizing Convolutions into Fully-connected Layers forImage Recognition](https://arxiv.org/abs/2105.01883)
+
+
+
+## Abstract
+
+We propose RepMLP, a multi-layer-perceptron-style neural network building block for image recognition, which is composed of a series of fully-connected (FC) layers. Compared to convolutional layers, FC layers are more efficient, better at modeling the long-range dependencies and positional patterns, but worse at capturing the local structures, hence usually less favored for image recognition. We propose a structural re-parameterization technique that adds local prior into an FC to make it powerful for image recognition. Specifically, we construct convolutional layers inside a RepMLP during training and merge them into the FC for inference. On CIFAR, a simple pure-MLP model shows performance very close to CNN. By inserting RepMLP in traditional CNN, we improve ResNets by 1.8% accuracy on ImageNet, 2.9% for face recognition, and 2.3% mIoU on Cityscapes with lower FLOPs. Our intriguing findings highlight that combining the global representational capacity and positional perception of FC with the local prior of convolution can improve the performance of neural network with faster speed on both the tasks with translation invariance (e.g., semantic segmentation) and those with aligned images and positional patterns (e.g., face recognition).
+
+
+

+
+
+## Results and models
+
+### ImageNet-1k
+
+| Model | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download |
+| :-----------: | :-------: | :------: | :-------: | :-------: | :-------------------------------------------------------------------------: | :---------------------------------------------------------------------------: |
+| RepMLP-B224\* | 68.24 | 6.71 | 80.41 | 95.12 | [train_cfg](https://github.com/open-mmlab/mmclassification/blob/master/configs/repmlp/repmlp-base_8xb64_in1k.py) \| [deploy_cfg](https://github.com/open-mmlab/mmclassification/blob/master/configs/repmlp/repmlp-base_delopy_8xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repmlp/repmlp-base_3rdparty_8xb64_in1k_20220330-1cb1f11b.pth) |
+| RepMLP-B256\* | 96.45 | 9.69 | 81.11 | 95.5 | [train_cfg](https://github.com/open-mmlab/mmclassification/blob/master/configs/repmlp/repmlp-base_8xb64_in1k-256px.py) \| [deploy_cfg](https://github.com/open-mmlab/mmclassification/blob/master/configs/repmlp/repmlp-b256_deploy_8xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repmlp/repmlp-base_3rdparty_8xb64_in1k-256px_20220330-7c5a91ce.pth) |
+
+*Models with * are converted from [the official repo.](https://github.com/DingXiaoH/RepMLP). The config files of these models are only for validation. We don't ensure these config files' training accuracy and welcome you to contribute your reproduction results.*
+
+## How to use
+
+The checkpoints provided are all `training-time` models. Use the reparameterize tool to switch them to more efficient `inference-time` architecture, which not only has fewer parameters but also less calculations.
+
+### Use tool
+
+Use provided tool to reparameterize the given model and save the checkpoint:
+
+```bash
+python tools/convert_models/reparameterize_model.py ${CFG_PATH} ${SRC_CKPT_PATH} ${TARGET_CKPT_PATH}
+```
+
+`${CFG_PATH}` is the config file, `${SRC_CKPT_PATH}` is the source chenpoint file, `${TARGET_CKPT_PATH}` is the target deploy weight file path.
+
+To use reparameterized weights, the config file must switch to the deploy config files.
+
+```bash
+python tools/test.py ${Deploy_CFG} ${Deploy_Checkpoint} --metrics accuracy
+```
+
+### In the code
+
+Use `backbone.switch_to_deploy()` or `classificer.backbone.switch_to_deploy()` to switch to the deploy mode. For example:
+
+```python
+from mmcls.models import build_backbone
+
+backbone_cfg=dict(type='RepMLPNet', arch='B', img_size=224, reparam_conv_kernels=(1, 3), deploy=False)
+backbone = build_backbone(backbone_cfg)
+backbone.switch_to_deploy()
+```
+
+or
+
+```python
+from mmcls.models import build_classifier
+
+cfg = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='RepMLPNet',
+ arch='B',
+ img_size=224,
+ reparam_conv_kernels=(1, 3),
+ deploy=False),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=768,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ))
+
+classifier = build_classifier(cfg)
+classifier.backbone.switch_to_deploy()
+```
+
+## Citation
+
+```
+@article{ding2021repmlp,
+ title={Repmlp: Re-parameterizing convolutions into fully-connected layers for image recognition},
+ author={Ding, Xiaohan and Xia, Chunlong and Zhang, Xiangyu and Chu, Xiaojie and Han, Jungong and Ding, Guiguang},
+ journal={arXiv preprint arXiv:2105.01883},
+ year={2021}
+}
+```
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/repmlp/metafile.yml b/openmmlab_test/mmclassification-0.24.1/configs/repmlp/metafile.yml
new file mode 100644
index 00000000..19caecbe
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/repmlp/metafile.yml
@@ -0,0 +1,48 @@
+Collections:
+ - Name: RepMLP
+ Metadata:
+ Training Data: ImageNet-1k
+ Architecture:
+ - Multi-layer Perceptron
+ - Re-parameterization Convolution
+ Paper:
+ URL: https://arxiv.org/abs/2105.01883
+ Title: 'RepMLP: Re-parameterizing Convolutions into Fully-connected Layers for Image Recognition'
+ README: configs/repmlp/README.md
+ Code:
+ URL: https://github.com/open-mmlab/mmclassification/blob/v0.21.0/mmcls/models/backbones/repmlp.py
+ Version: v0.21.0
+
+Models:
+ - Name: repmlp-base_3rdparty_8xb64_in1k
+ In Collection: RepMLP
+ Config: configs/repmlp/repmlp-base_8xb64_in1k.py
+ Metadata:
+ FLOPs: 6710000000 # 6.71 G
+ Parameters: 68240000 # 68.24 M
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 80.41
+ Top 5 Accuracy: 95.14
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/repmlp/repmlp-base_3rdparty_8xb64_in1k_20220330-1cb1f11b.pth
+ Converted From:
+ Weights: https://github.com/DingXiaoH/RepMLP
+ Code: https://github.com/DingXiaoH/RepMLP/blob/072d8516beba83d75dfe6ebb12f625abad4b53d5/repmlpnet.py#L274
+ - Name: repmlp-base_3rdparty_8xb64_in1k-256px.py
+ In Collection: RepMLP
+ Config: configs/repmlp/repmlp-base_8xb64_in1k-256px.py
+ Metadata:
+ FLOPs: 9690000000 # 9.69 G
+ Parameters: 96450000 # 96.45M
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 81.11
+ Top 5 Accuracy: 95.50
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/repmlp/repmlp-base_3rdparty_8xb64_in1k-256px_20220330-7c5a91ce.pth
+ Converted From:
+ Weights: https://github.com/DingXiaoH/RepMLP
+ Code: https://github.com/DingXiaoH/RepMLP/blob/072d8516beba83d75dfe6ebb12f625abad4b53d5/repmlpnet.py#L278
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/repmlp/repmlp-base_8xb64_in1k-256px.py b/openmmlab_test/mmclassification-0.24.1/configs/repmlp/repmlp-base_8xb64_in1k-256px.py
new file mode 100644
index 00000000..ff03c6f9
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/repmlp/repmlp-base_8xb64_in1k-256px.py
@@ -0,0 +1,21 @@
+_base_ = [
+ '../_base_/models/repmlp-base_224.py',
+ '../_base_/datasets/imagenet_bs64_mixer_224.py',
+ '../_base_/schedules/imagenet_bs4096_AdamW.py',
+ '../_base_/default_runtime.py'
+]
+
+model = dict(backbone=dict(img_size=256))
+
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='Resize', size=(256 * 256 // 224, -1), backend='pillow'),
+ dict(type='CenterCrop', crop_size=256),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+data = dict(
+ val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/repmlp/repmlp-base_8xb64_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/repmlp/repmlp-base_8xb64_in1k.py
new file mode 100644
index 00000000..430cdc0c
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/repmlp/repmlp-base_8xb64_in1k.py
@@ -0,0 +1,20 @@
+_base_ = [
+ '../_base_/models/repmlp-base_224.py',
+ '../_base_/datasets/imagenet_bs64_pil_resize.py',
+ '../_base_/schedules/imagenet_bs1024_adamw_swin.py',
+ '../_base_/default_runtime.py'
+]
+
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ # resizing to (256, 256) here, different with resizing shorter edge to 256
+ dict(type='Resize', size=(256, 256), backend='pillow'),
+ dict(type='CenterCrop', crop_size=224),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+data = dict(
+ val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/repmlp/repmlp-base_delopy_8xb64_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/repmlp/repmlp-base_delopy_8xb64_in1k.py
new file mode 100644
index 00000000..b5b2c882
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/repmlp/repmlp-base_delopy_8xb64_in1k.py
@@ -0,0 +1,3 @@
+_base_ = ['./repmlp-base_8xb64_in1k.py']
+
+model = dict(backbone=dict(deploy=True))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/repmlp/repmlp-base_deploy_8xb64_in1k-256px.py b/openmmlab_test/mmclassification-0.24.1/configs/repmlp/repmlp-base_deploy_8xb64_in1k-256px.py
new file mode 100644
index 00000000..27ff50a0
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/repmlp/repmlp-base_deploy_8xb64_in1k-256px.py
@@ -0,0 +1,3 @@
+_base_ = ['./repmlp-base_8xb64_in1k-256px.py']
+
+model = dict(backbone=dict(deploy=True))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/repvgg/README.md b/openmmlab_test/mmclassification-0.24.1/configs/repvgg/README.md
new file mode 100644
index 00000000..b9341326
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/repvgg/README.md
@@ -0,0 +1,101 @@
+# RepVGG
+
+> [Repvgg: Making vgg-style convnets great again](https://arxiv.org/abs/2101.03697)
+
+
+
+## Abstract
+
+We present a simple but powerful architecture of convolutional neural network, which has a VGG-like inference-time body composed of nothing but a stack of 3x3 convolution and ReLU, while the training-time model has a multi-branch topology. Such decoupling of the training-time and inference-time architecture is realized by a structural re-parameterization technique so that the model is named RepVGG. On ImageNet, RepVGG reaches over 80% top-1 accuracy, which is the first time for a plain model, to the best of our knowledge. On NVIDIA 1080Ti GPU, RepVGG models run 83% faster than ResNet-50 or 101% faster than ResNet-101 with higher accuracy and show favorable accuracy-speed trade-off compared to the state-of-the-art models like EfficientNet and RegNet.
+
+
+

+
+
+## Results and models
+
+### ImageNet-1k
+
+| Model | Epochs | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download |
+| :-----------: | :----: | :-------------------------------: | :-----------------------------: | :-------: | :-------: | :----------------------------------------------: | :-------------------------------------------------: |
+| RepVGG-A0\* | 120 | 9.11(train) \| 8.31 (deploy) | 1.52 (train) \| 1.36 (deploy) | 72.41 | 90.50 | [config (train)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/repvgg-A0_4xb64-coslr-120e_in1k.py) \| [config (deploy)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/deploy/repvgg-A0_deploy_4xb64-coslr-120e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-A0_3rdparty_4xb64-coslr-120e_in1k_20210909-883ab98c.pth) |
+| RepVGG-A1\* | 120 | 14.09 (train) \| 12.79 (deploy) | 2.64 (train) \| 2.37 (deploy) | 74.47 | 91.85 | [config (train)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/repvgg-A1_4xb64-coslr-120e_in1k.py) \| [config (deploy)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/deploy/repvgg-A1_deploy_4xb64-coslr-120e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-A1_3rdparty_4xb64-coslr-120e_in1k_20210909-24003a24.pth) |
+| RepVGG-A2\* | 120 | 28.21 (train) \| 25.5 (deploy) | 5.7 (train) \| 5.12 (deploy) | 76.48 | 93.01 | [config (train)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/repvgg-A2_4xb64-coslr-120e_in1k.py) \|[config (deploy)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/deploy/repvgg-A2_deploy_4xb64-coslr-120e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-A2_3rdparty_4xb64-coslr-120e_in1k_20210909-97d7695a.pth) |
+| RepVGG-B0\* | 120 | 15.82 (train) \| 14.34 (deploy) | 3.42 (train) \| 3.06 (deploy) | 75.14 | 92.42 | [config (train)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/repvgg-B0_4xb64-coslr-120e_in1k.py) \|[config (deploy)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/deploy/repvgg-B0_deploy_4xb64-coslr-120e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B0_3rdparty_4xb64-coslr-120e_in1k_20210909-446375f4.pth) |
+| RepVGG-B1\* | 120 | 57.42 (train) \| 51.83 (deploy) | 13.16 (train) \| 11.82 (deploy) | 78.37 | 94.11 | [config (train)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/repvgg-B1_4xb64-coslr-120e_in1k.py) \|[config (deploy)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/deploy/repvgg-B1_deploy_4xb64-coslr-120e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B1_3rdparty_4xb64-coslr-120e_in1k_20210909-750cdf67.pth) |
+| RepVGG-B1g2\* | 120 | 45.78 (train) \| 41.36 (deploy) | 9.82 (train) \| 8.82 (deploy) | 77.79 | 93.88 | [config (train)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/repvgg-B1g2_4xb64-coslr-120e_in1k.py) \|[config (deploy)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/deploy/repvgg-B1g2_deploy_4xb64-coslr-120e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B1g2_3rdparty_4xb64-coslr-120e_in1k_20210909-344f6422.pth) |
+| RepVGG-B1g4\* | 120 | 39.97 (train) \| 36.13 (deploy) | 8.15 (train) \| 7.32 (deploy) | 77.58 | 93.84 | [config (train)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/repvgg-B1g4_4xb64-coslr-120e_in1k.py) \|[config (deploy)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/deploy/repvgg-B1g4_deploy_4xb64-coslr-120e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B1g4_3rdparty_4xb64-coslr-120e_in1k_20210909-d4c1a642.pth) |
+| RepVGG-B2\* | 120 | 89.02 (train) \| 80.32 (deploy) | 20.46 (train) \| 18.39 (deploy) | 78.78 | 94.42 | [config (train)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/repvgg-B2_4xb64-coslr-120e_in1k.py) \|[config (deploy)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/deploy/repvgg-B2_deploy_4xb64-coslr-120e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B2_3rdparty_4xb64-coslr-120e_in1k_20210909-bd6b937c.pth) |
+| RepVGG-B2g4\* | 200 | 61.76 (train) \| 55.78 (deploy) | 12.63 (train) \| 11.34 (deploy) | 79.38 | 94.68 | [config (train)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/repvgg-B2g4_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py) \|[config (deploy)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/deploy/repvgg-B2g4_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B2g4_3rdparty_4xb64-autoaug-lbs-mixup-coslr-200e_in1k_20210909-7b7955f0.pth) |
+| RepVGG-B3\* | 200 | 123.09 (train) \| 110.96 (deploy) | 29.17 (train) \| 26.22 (deploy) | 80.52 | 95.26 | [config (train)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/repvgg-B3_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py) \|[config (deploy)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/deploy/repvgg-B3_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B3_3rdparty_4xb64-autoaug-lbs-mixup-coslr-200e_in1k_20210909-dda968bf.pth) |
+| RepVGG-B3g4\* | 200 | 83.83 (train) \| 75.63 (deploy) | 17.9 (train) \| 16.08 (deploy) | 80.22 | 95.10 | [config (train)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/repvgg-B3g4_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py) \|[config (deploy)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/deploy/repvgg-B3g4_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B3g4_3rdparty_4xb64-autoaug-lbs-mixup-coslr-200e_in1k_20210909-4e54846a.pth) |
+| RepVGG-D2se\* | 200 | 133.33 (train) \| 120.39 (deploy) | 36.56 (train) \| 32.85 (deploy) | 81.81 | 95.94 | [config (train)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/repvgg-D2se_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py) \|[config (deploy)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/deploy/repvgg-D2se_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-D2se_3rdparty_4xb64-autoaug-lbs-mixup-coslr-200e_in1k_20210909-cf3139b7.pth) |
+
+*Models with * are converted from the [official repo](https://github.com/DingXiaoH/RepVGG). The config files of these models are only for validation. We don't ensure these config files' training accuracy and welcome you to contribute your reproduction results.*
+
+## How to use
+
+The checkpoints provided are all `training-time` models. Use the reparameterize tool to switch them to more efficient `inference-time` architecture, which not only has fewer parameters but also less calculations.
+
+### Use tool
+
+Use provided tool to reparameterize the given model and save the checkpoint:
+
+```bash
+python tools/convert_models/reparameterize_model.py ${CFG_PATH} ${SRC_CKPT_PATH} ${TARGET_CKPT_PATH}
+```
+
+`${CFG_PATH}` is the config file, `${SRC_CKPT_PATH}` is the source chenpoint file, `${TARGET_CKPT_PATH}` is the target deploy weight file path.
+
+To use reparameterized weights, the config file must switch to the deploy config files.
+
+```bash
+python tools/test.py ${Deploy_CFG} ${Deploy_Checkpoint} --metrics accuracy
+```
+
+### In the code
+
+Use `backbone.switch_to_deploy()` or `classificer.backbone.switch_to_deploy()` to switch to the deploy mode. For example:
+
+```python
+from mmcls.models import build_backbone
+
+backbone_cfg=dict(type='RepVGG',arch='A0'),
+backbone = build_backbone(backbone_cfg)
+backbone.switch_to_deploy()
+```
+
+or
+
+```python
+from mmcls.models import build_classifier
+
+cfg = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='RepVGG',
+ arch='A0'),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=1280,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ))
+
+classifier = build_classifier(cfg)
+classifier.backbone.switch_to_deploy()
+```
+
+## Citation
+
+```
+@inproceedings{ding2021repvgg,
+ title={Repvgg: Making vgg-style convnets great again},
+ author={Ding, Xiaohan and Zhang, Xiangyu and Ma, Ningning and Han, Jungong and Ding, Guiguang and Sun, Jian},
+ booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition},
+ pages={13733--13742},
+ year={2021}
+}
+```
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/repvgg/deploy/repvgg-A0_deploy_4xb64-coslr-120e_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/repvgg/deploy/repvgg-A0_deploy_4xb64-coslr-120e_in1k.py
new file mode 100644
index 00000000..20787f28
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/repvgg/deploy/repvgg-A0_deploy_4xb64-coslr-120e_in1k.py
@@ -0,0 +1,3 @@
+_base_ = '../repvgg-A0_4xb64-coslr-120e_in1k.py'
+
+model = dict(backbone=dict(deploy=True))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/repvgg/deploy/repvgg-A1_deploy_4xb64-coslr-120e_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/repvgg/deploy/repvgg-A1_deploy_4xb64-coslr-120e_in1k.py
new file mode 100644
index 00000000..eea0da9c
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/repvgg/deploy/repvgg-A1_deploy_4xb64-coslr-120e_in1k.py
@@ -0,0 +1,3 @@
+_base_ = '../repvgg-A1_4xb64-coslr-120e_in1k.py'
+
+model = dict(backbone=dict(deploy=True))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/repvgg/deploy/repvgg-A2_deploy_4xb64-coslr-120e_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/repvgg/deploy/repvgg-A2_deploy_4xb64-coslr-120e_in1k.py
new file mode 100644
index 00000000..7b0cea7b
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/repvgg/deploy/repvgg-A2_deploy_4xb64-coslr-120e_in1k.py
@@ -0,0 +1,3 @@
+_base_ = '../repvgg-A2_4xb64-coslr-120e_in1k.py'
+
+model = dict(backbone=dict(deploy=True))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/repvgg/deploy/repvgg-B0_deploy_4xb64-coslr-120e_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/repvgg/deploy/repvgg-B0_deploy_4xb64-coslr-120e_in1k.py
new file mode 100644
index 00000000..23a2898a
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/repvgg/deploy/repvgg-B0_deploy_4xb64-coslr-120e_in1k.py
@@ -0,0 +1,3 @@
+_base_ = '../repvgg-B0_4xb64-coslr-120e_in1k.py'
+
+model = dict(backbone=dict(deploy=True))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/repvgg/deploy/repvgg-B1_deploy_4xb64-coslr-120e_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/repvgg/deploy/repvgg-B1_deploy_4xb64-coslr-120e_in1k.py
new file mode 100644
index 00000000..24355eda
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/repvgg/deploy/repvgg-B1_deploy_4xb64-coslr-120e_in1k.py
@@ -0,0 +1,3 @@
+_base_ = '../repvgg-B1_4xb64-coslr-120e_in1k.py'
+
+model = dict(backbone=dict(deploy=True))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/repvgg/deploy/repvgg-B1g2_deploy_4xb64-coslr-120e_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/repvgg/deploy/repvgg-B1g2_deploy_4xb64-coslr-120e_in1k.py
new file mode 100644
index 00000000..579fcc47
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/repvgg/deploy/repvgg-B1g2_deploy_4xb64-coslr-120e_in1k.py
@@ -0,0 +1,3 @@
+_base_ = '../repvgg-B1g2_4xb64-coslr-120e_in1k.py'
+
+model = dict(backbone=dict(deploy=True))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/repvgg/deploy/repvgg-B1g4_deploy_4xb64-coslr-120e_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/repvgg/deploy/repvgg-B1g4_deploy_4xb64-coslr-120e_in1k.py
new file mode 100644
index 00000000..eab5d440
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/repvgg/deploy/repvgg-B1g4_deploy_4xb64-coslr-120e_in1k.py
@@ -0,0 +1,3 @@
+_base_ = '../repvgg-B1g4_4xb64-coslr-120e_in1k.py'
+
+model = dict(backbone=dict(deploy=True))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/repvgg/deploy/repvgg-B2_deploy_4xb64-coslr-120e_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/repvgg/deploy/repvgg-B2_deploy_4xb64-coslr-120e_in1k.py
new file mode 100644
index 00000000..0681f14d
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/repvgg/deploy/repvgg-B2_deploy_4xb64-coslr-120e_in1k.py
@@ -0,0 +1,3 @@
+_base_ = '../repvgg-B2_4xb64-coslr-120e_in1k.py'
+
+model = dict(backbone=dict(deploy=True))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/repvgg/deploy/repvgg-B2g4_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/repvgg/deploy/repvgg-B2g4_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py
new file mode 100644
index 00000000..8f184014
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/repvgg/deploy/repvgg-B2g4_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py
@@ -0,0 +1,3 @@
+_base_ = '../repvgg-B2g4_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py'
+
+model = dict(backbone=dict(deploy=True))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/repvgg/deploy/repvgg-B3_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/repvgg/deploy/repvgg-B3_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py
new file mode 100644
index 00000000..e60b0678
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/repvgg/deploy/repvgg-B3_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py
@@ -0,0 +1,3 @@
+_base_ = '../repvgg-B3_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py'
+
+model = dict(backbone=dict(deploy=True))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/repvgg/deploy/repvgg-B3g4_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/repvgg/deploy/repvgg-B3g4_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py
new file mode 100644
index 00000000..46f18778
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/repvgg/deploy/repvgg-B3g4_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py
@@ -0,0 +1,3 @@
+_base_ = '../repvgg-B3g4_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py'
+
+model = dict(backbone=dict(deploy=True))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/repvgg/deploy/repvgg-D2se_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/repvgg/deploy/repvgg-D2se_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py
new file mode 100644
index 00000000..66dff3b6
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/repvgg/deploy/repvgg-D2se_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py
@@ -0,0 +1,3 @@
+_base_ = '../repvgg-D2se_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py'
+
+model = dict(backbone=dict(deploy=True))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/repvgg/metafile.yml b/openmmlab_test/mmclassification-0.24.1/configs/repvgg/metafile.yml
new file mode 100644
index 00000000..84fee591
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/repvgg/metafile.yml
@@ -0,0 +1,208 @@
+Collections:
+ - Name: RepVGG
+ Metadata:
+ Training Data: ImageNet-1k
+ Architecture:
+ - re-parameterization Convolution
+ - VGG-style Neural Network
+ Paper:
+ URL: https://arxiv.org/abs/2101.03697
+ Title: 'RepVGG: Making VGG-style ConvNets Great Again'
+ README: configs/repvgg/README.md
+ Code:
+ URL: https://github.com/open-mmlab/mmclassification/blob/v0.16.0/mmcls/models/backbones/repvgg.py#L257
+ Version: v0.16.0
+
+Models:
+ - Name: repvgg-A0_3rdparty_4xb64-coslr-120e_in1k
+ In Collection: RepVGG
+ Config: configs/repvgg/repvgg-A0_4xb64-coslr-120e_in1k.py
+ Metadata:
+ FLOPs: 1520000000
+ Parameters: 9110000
+ Results:
+ - Dataset: ImageNet-1k
+ Task: Image Classification
+ Metrics:
+ Top 1 Accuracy: 72.41
+ Top 5 Accuracy: 90.50
+ Weights: https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-A0_3rdparty_4xb64-coslr-120e_in1k_20210909-883ab98c.pth
+ Converted From:
+ Weights: https://drive.google.com/drive/folders/1Avome4KvNp0Lqh2QwhXO6L5URQjzCjUq
+ Code: https://github.com/DingXiaoH/RepVGG/blob/9f272318abfc47a2b702cd0e916fca8d25d683e7/repvgg.py#L196
+ - Name: repvgg-A1_3rdparty_4xb64-coslr-120e_in1k
+ In Collection: RepVGG
+ Config: configs/repvgg/repvgg-A1_4xb64-coslr-120e_in1k.py
+ Metadata:
+ FLOPs: 2640000000
+ Parameters: 14090000
+ Results:
+ - Dataset: ImageNet-1k
+ Task: Image Classification
+ Metrics:
+ Top 1 Accuracy: 74.47
+ Top 5 Accuracy: 91.85
+ Weights: https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-A1_3rdparty_4xb64-coslr-120e_in1k_20210909-24003a24.pth
+ Converted From:
+ Weights: https://drive.google.com/drive/folders/1Avome4KvNp0Lqh2QwhXO6L5URQjzCjUq
+ Code: https://github.com/DingXiaoH/RepVGG/blob/9f272318abfc47a2b702cd0e916fca8d25d683e7/repvgg.py#L200
+ - Name: repvgg-A2_3rdparty_4xb64-coslr-120e_in1k
+ In Collection: RepVGG
+ Config: configs/repvgg/repvgg-A2_4xb64-coslr-120e_in1k.py
+ Metadata:
+ FLOPs: 28210000000
+ Parameters: 5700000
+ Results:
+ - Dataset: ImageNet-1k
+ Task: Image Classification
+ Metrics:
+ Top 1 Accuracy: 76.48
+ Top 5 Accuracy: 93.01
+ Weights: https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-A2_3rdparty_4xb64-coslr-120e_in1k_20210909-97d7695a.pth
+ Converted From:
+ Weights: https://drive.google.com/drive/folders/1Avome4KvNp0Lqh2QwhXO6L5URQjzCjUq
+ Code: https://github.com/DingXiaoH/RepVGG/blob/9f272318abfc47a2b702cd0e916fca8d25d683e7/repvgg.py#L204
+ - Name: repvgg-B0_3rdparty_4xb64-coslr-120e_in1k
+ In Collection: RepVGG
+ Config: configs/repvgg/repvgg-B0_4xb64-coslr-120e_in1k.py
+ Metadata:
+ FLOPs: 15820000000
+ Parameters: 3420000
+ Results:
+ - Dataset: ImageNet-1k
+ Task: Image Classification
+ Metrics:
+ Top 1 Accuracy: 75.14
+ Top 5 Accuracy: 92.42
+ Weights: https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B0_3rdparty_4xb64-coslr-120e_in1k_20210909-446375f4.pth
+ Converted From:
+ Weights: https://drive.google.com/drive/folders/1Avome4KvNp0Lqh2QwhXO6L5URQjzCjUq
+ Code: https://github.com/DingXiaoH/RepVGG/blob/9f272318abfc47a2b702cd0e916fca8d25d683e7/repvgg.py#L208
+ - Name: repvgg-B1_3rdparty_4xb64-coslr-120e_in1k
+ In Collection: RepVGG
+ Config: configs/repvgg/repvgg-B1_4xb64-coslr-120e_in1k.py
+ Metadata:
+ FLOPs: 57420000000
+ Parameters: 13160000
+ Results:
+ - Dataset: ImageNet-1k
+ Task: Image Classification
+ Metrics:
+ Top 1 Accuracy: 78.37
+ Top 5 Accuracy: 94.11
+ Weights: https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B1_3rdparty_4xb64-coslr-120e_in1k_20210909-750cdf67.pth
+ Converted From:
+ Weights: https://drive.google.com/drive/folders/1Avome4KvNp0Lqh2QwhXO6L5URQjzCjUq
+ Code: https://github.com/DingXiaoH/RepVGG/blob/9f272318abfc47a2b702cd0e916fca8d25d683e7/repvgg.py#L212
+ - Name: repvgg-B1g2_3rdparty_4xb64-coslr-120e_in1k
+ In Collection: RepVGG
+ Config: configs/repvgg/repvgg-B1g2_4xb64-coslr-120e_in1k.py
+ Metadata:
+ FLOPs: 45780000000
+ Parameters: 9820000
+ Results:
+ - Dataset: ImageNet-1k
+ Task: Image Classification
+ Metrics:
+ Top 1 Accuracy: 77.79
+ Top 5 Accuracy: 93.88
+ Weights: https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B1g2_3rdparty_4xb64-coslr-120e_in1k_20210909-344f6422.pth
+ Converted From:
+ Weights: https://drive.google.com/drive/folders/1Avome4KvNp0Lqh2QwhXO6L5URQjzCjUq
+ Code: https://github.com/DingXiaoH/RepVGG/blob/9f272318abfc47a2b702cd0e916fca8d25d683e7/repvgg.py#L216
+ - Name: repvgg-B1g4_3rdparty_4xb64-coslr-120e_in1k
+ In Collection: RepVGG
+ Config: configs/repvgg/repvgg-B1g4_4xb64-coslr-120e_in1k.py
+ Metadata:
+ FLOPs: 39970000000
+ Parameters: 8150000
+ Results:
+ - Dataset: ImageNet-1k
+ Task: Image Classification
+ Metrics:
+ Top 1 Accuracy: 77.58
+ Top 5 Accuracy: 93.84
+ Weights: https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B1g4_3rdparty_4xb64-coslr-120e_in1k_20210909-d4c1a642.pth
+ Converted From:
+ Weights: https://drive.google.com/drive/folders/1Avome4KvNp0Lqh2QwhXO6L5URQjzCjUq
+ Code: https://github.com/DingXiaoH/RepVGG/blob/9f272318abfc47a2b702cd0e916fca8d25d683e7/repvgg.py#L220
+ - Name: repvgg-B2_3rdparty_4xb64-coslr-120e_in1k
+ In Collection: RepVGG
+ Config: configs/repvgg/repvgg-B2_4xb64-coslr-120e_in1k.py
+ Metadata:
+ FLOPs: 89020000000
+ Parameters: 20420000
+ Results:
+ - Dataset: ImageNet-1k
+ Task: Image Classification
+ Metrics:
+ Top 1 Accuracy: 78.78
+ Top 5 Accuracy: 94.42
+ Weights: https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B2_3rdparty_4xb64-coslr-120e_in1k_20210909-bd6b937c.pth
+ Converted From:
+ Weights: https://drive.google.com/drive/folders/1Avome4KvNp0Lqh2QwhXO6L5URQjzCjUq
+ Code: https://github.com/DingXiaoH/RepVGG/blob/9f272318abfc47a2b702cd0e916fca8d25d683e7/repvgg.py#L225
+ - Name: repvgg-B2g4_3rdparty_4xb64-autoaug-lbs-mixup-coslr-200e_in1k
+ In Collection: RepVGG
+ Config: configs/repvgg/repvgg-B2g4_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py
+ Metadata:
+ FLOPs: 61760000000
+ Parameters: 12630000
+ Results:
+ - Dataset: ImageNet-1k
+ Task: Image Classification
+ Metrics:
+ Top 1 Accuracy: 79.38
+ Top 5 Accuracy: 94.68
+ Weights: https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B2g4_3rdparty_4xb64-autoaug-lbs-mixup-coslr-200e_in1k_20210909-7b7955f0.pth
+ Converted From:
+ Weights: https://drive.google.com/drive/folders/1Avome4KvNp0Lqh2QwhXO6L5URQjzCjUq
+ Code: https://github.com/DingXiaoH/RepVGG/blob/9f272318abfc47a2b702cd0e916fca8d25d683e7/repvgg.py#L229
+ - Name: repvgg-B3_3rdparty_4xb64-autoaug-lbs-mixup-coslr-200e_in1k
+ In Collection: RepVGG
+ Config: configs/repvgg/repvgg-B3_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py
+ Metadata:
+ FLOPs: 123090000000
+ Parameters: 29170000
+ Results:
+ - Dataset: ImageNet-1k
+ Task: Image Classification
+ Metrics:
+ Top 1 Accuracy: 80.52
+ Top 5 Accuracy: 95.26
+ Weights: https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B3_3rdparty_4xb64-autoaug-lbs-mixup-coslr-200e_in1k_20210909-dda968bf.pth
+ Converted From:
+ Weights: https://drive.google.com/drive/folders/1Avome4KvNp0Lqh2QwhXO6L5URQjzCjUq
+ Code: https://github.com/DingXiaoH/RepVGG/blob/9f272318abfc47a2b702cd0e916fca8d25d683e7/repvgg.py#L238
+ - Name: repvgg-B3g4_3rdparty_4xb64-autoaug-lbs-mixup-coslr-200e_in1k
+ In Collection: RepVGG
+ Config: configs/repvgg/repvgg-B3g4_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py
+ Metadata:
+ FLOPs: 83830000000
+ Parameters: 17900000
+ Results:
+ - Dataset: ImageNet-1k
+ Task: Image Classification
+ Metrics:
+ Top 1 Accuracy: 80.22
+ Top 5 Accuracy: 95.10
+ Weights: https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B3g4_3rdparty_4xb64-autoaug-lbs-mixup-coslr-200e_in1k_20210909-4e54846a.pth
+ Converted From:
+ Weights: https://drive.google.com/drive/folders/1Avome4KvNp0Lqh2QwhXO6L5URQjzCjUq
+ Code: https://github.com/DingXiaoH/RepVGG/blob/9f272318abfc47a2b702cd0e916fca8d25d683e7/repvgg.py#L238
+ - Name: repvgg-D2se_3rdparty_4xb64-autoaug-lbs-mixup-coslr-200e_in1k
+ In Collection: RepVGG
+ Config: configs/repvgg/repvgg-D2se_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py
+ Metadata:
+ FLOPs: 133330000000
+ Parameters: 36560000
+ Results:
+ - Dataset: ImageNet-1k
+ Task: Image Classification
+ Metrics:
+ Top 1 Accuracy: 81.81
+ Top 5 Accuracy: 95.94
+ Weights: https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-D2se_3rdparty_4xb64-autoaug-lbs-mixup-coslr-200e_in1k_20210909-cf3139b7.pth
+ Converted From:
+ Weights: https://drive.google.com/drive/folders/1Avome4KvNp0Lqh2QwhXO6L5URQjzCjUq
+ Code: https://github.com/DingXiaoH/RepVGG/blob/9f272318abfc47a2b702cd0e916fca8d25d683e7/repvgg.py#L250
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/repvgg/repvgg-A0_4xb64-coslr-120e_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/repvgg/repvgg-A0_4xb64-coslr-120e_in1k.py
new file mode 100644
index 00000000..a7fd3bbe
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/repvgg/repvgg-A0_4xb64-coslr-120e_in1k.py
@@ -0,0 +1,8 @@
+_base_ = [
+ '../_base_/models/repvgg-A0_in1k.py',
+ '../_base_/datasets/imagenet_bs64_pil_resize.py',
+ '../_base_/schedules/imagenet_bs256_coslr.py',
+ '../_base_/default_runtime.py'
+]
+
+runner = dict(max_epochs=120)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/repvgg/repvgg-A1_4xb64-coslr-120e_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/repvgg/repvgg-A1_4xb64-coslr-120e_in1k.py
new file mode 100644
index 00000000..649020f2
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/repvgg/repvgg-A1_4xb64-coslr-120e_in1k.py
@@ -0,0 +1,3 @@
+_base_ = './repvgg-A0_4xb64-coslr-120e_in1k.py'
+
+model = dict(backbone=dict(arch='A1'))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/repvgg/repvgg-A2_4xb64-coslr-120e_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/repvgg/repvgg-A2_4xb64-coslr-120e_in1k.py
new file mode 100644
index 00000000..eedaf2d2
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/repvgg/repvgg-A2_4xb64-coslr-120e_in1k.py
@@ -0,0 +1,3 @@
+_base_ = './repvgg-A0_4xb64-coslr-120e_in1k.py'
+
+model = dict(backbone=dict(arch='A2'), head=dict(in_channels=1408))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/repvgg/repvgg-B0_4xb64-coslr-120e_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/repvgg/repvgg-B0_4xb64-coslr-120e_in1k.py
new file mode 100644
index 00000000..b3ce7ea2
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/repvgg/repvgg-B0_4xb64-coslr-120e_in1k.py
@@ -0,0 +1,3 @@
+_base_ = './repvgg-A0_4xb64-coslr-120e_in1k.py'
+
+model = dict(backbone=dict(arch='B0'), head=dict(in_channels=1280))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/repvgg/repvgg-B1_4xb64-coslr-120e_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/repvgg/repvgg-B1_4xb64-coslr-120e_in1k.py
new file mode 100644
index 00000000..30adea3d
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/repvgg/repvgg-B1_4xb64-coslr-120e_in1k.py
@@ -0,0 +1,3 @@
+_base_ = './repvgg-A0_4xb64-coslr-120e_in1k.py'
+
+model = dict(backbone=dict(arch='B1'), head=dict(in_channels=2048))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/repvgg/repvgg-B1g2_4xb64-coslr-120e_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/repvgg/repvgg-B1g2_4xb64-coslr-120e_in1k.py
new file mode 100644
index 00000000..2749db8d
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/repvgg/repvgg-B1g2_4xb64-coslr-120e_in1k.py
@@ -0,0 +1,3 @@
+_base_ = './repvgg-A0_4xb64-coslr-120e_in1k.py'
+
+model = dict(backbone=dict(arch='B1g2'), head=dict(in_channels=2048))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/repvgg/repvgg-B1g4_4xb64-coslr-120e_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/repvgg/repvgg-B1g4_4xb64-coslr-120e_in1k.py
new file mode 100644
index 00000000..26476909
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/repvgg/repvgg-B1g4_4xb64-coslr-120e_in1k.py
@@ -0,0 +1,3 @@
+_base_ = './repvgg-A0_4xb64-coslr-120e_in1k.py'
+
+model = dict(backbone=dict(arch='B1g4'), head=dict(in_channels=2048))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/repvgg/repvgg-B2_4xb64-coslr-120e_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/repvgg/repvgg-B2_4xb64-coslr-120e_in1k.py
new file mode 100644
index 00000000..4d215567
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/repvgg/repvgg-B2_4xb64-coslr-120e_in1k.py
@@ -0,0 +1,3 @@
+_base_ = './repvgg-A0_4xb64-coslr-120e_in1k.py'
+
+model = dict(backbone=dict(arch='B2'), head=dict(in_channels=2560))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/repvgg/repvgg-B2g4_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/repvgg/repvgg-B2g4_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py
new file mode 100644
index 00000000..11331cf0
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/repvgg/repvgg-B2g4_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py
@@ -0,0 +1,3 @@
+_base_ = './repvgg-B3_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py'
+
+model = dict(backbone=dict(arch='B2g4'))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/repvgg/repvgg-B3_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/repvgg/repvgg-B3_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py
new file mode 100644
index 00000000..7b6dc506
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/repvgg/repvgg-B3_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py
@@ -0,0 +1,6 @@
+_base_ = [
+ '../_base_/models/repvgg-B3_lbs-mixup_in1k.py',
+ '../_base_/datasets/imagenet_bs64_pil_resize.py',
+ '../_base_/schedules/imagenet_bs256_200e_coslr_warmup.py',
+ '../_base_/default_runtime.py'
+]
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/repvgg/repvgg-B3g4_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/repvgg/repvgg-B3g4_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py
new file mode 100644
index 00000000..67e3688c
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/repvgg/repvgg-B3g4_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py
@@ -0,0 +1,3 @@
+_base_ = './repvgg-B3_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py'
+
+model = dict(backbone=dict(arch='B3g4'))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/repvgg/repvgg-D2se_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/repvgg/repvgg-D2se_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py
new file mode 100644
index 00000000..d235610f
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/repvgg/repvgg-D2se_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py
@@ -0,0 +1,3 @@
+_base_ = './repvgg-B3_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py'
+
+model = dict(backbone=dict(arch='D2se'))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/res2net/README.md b/openmmlab_test/mmclassification-0.24.1/configs/res2net/README.md
new file mode 100644
index 00000000..61190092
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/res2net/README.md
@@ -0,0 +1,37 @@
+# Res2Net
+
+> [Res2Net: A New Multi-scale Backbone Architecture](https://arxiv.org/pdf/1904.01169.pdf)
+
+
+
+## Abstract
+
+Representing features at multiple scales is of great importance for numerous vision tasks. Recent advances in backbone convolutional neural networks (CNNs) continually demonstrate stronger multi-scale representation ability, leading to consistent performance gains on a wide range of applications. However, most existing methods represent the multi-scale features in a layer-wise manner. In this paper, we propose a novel building block for CNNs, namely Res2Net, by constructing hierarchical residual-like connections within one single residual block. The Res2Net represents multi-scale features at a granular level and increases the range of receptive fields for each network layer. The proposed Res2Net block can be plugged into the state-of-the-art backbone CNN models, e.g., ResNet, ResNeXt, and DLA. We evaluate the Res2Net block on all these models and demonstrate consistent performance gains over baseline models on widely-used datasets, e.g., CIFAR-100 and ImageNet. Further ablation studies and experimental results on representative computer vision tasks, i.e., object detection, class activation mapping, and salient object detection, further verify the superiority of the Res2Net over the state-of-the-art baseline methods.
+
+
+

+
+
+## Results and models
+
+### ImageNet-1k
+
+| Model | resolution | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download |
+| :------------------: | :--------: | :-------: | :------: | :-------: | :-------: | :----------------------------------------------------------------: | :-------------------------------------------------------------------: |
+| Res2Net-50-14w-8s\* | 224x224 | 25.06 | 4.22 | 78.14 | 93.85 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/res2net/res2net50-w14-s8_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/res2net/res2net50-w14-s8_3rdparty_8xb32_in1k_20210927-bc967bf1.pth) \| [log](<>) |
+| Res2Net-50-26w-8s\* | 224x224 | 48.40 | 8.39 | 79.20 | 94.36 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/res2net/res2net50-w26-s8_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/res2net/res2net50-w26-s8_3rdparty_8xb32_in1k_20210927-f547a94b.pth) \| [log](<>) |
+| Res2Net-101-26w-4s\* | 224x224 | 45.21 | 8.12 | 79.19 | 94.44 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/res2net/res2net101-w26-s4_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/res2net/res2net101-w26-s4_3rdparty_8xb32_in1k_20210927-870b6c36.pth) \| [log](<>) |
+
+*Models with * are converted from the [official repo](https://github.com/Res2Net/Res2Net-PretrainedModels). The config files of these models are only for validation. We don't ensure these config files' training accuracy and welcome you to contribute your reproduction results.*
+
+## Citation
+
+```
+@article{gao2019res2net,
+ title={Res2Net: A New Multi-scale Backbone Architecture},
+ author={Gao, Shang-Hua and Cheng, Ming-Ming and Zhao, Kai and Zhang, Xin-Yu and Yang, Ming-Hsuan and Torr, Philip},
+ journal={IEEE TPAMI},
+ year={2021},
+ doi={10.1109/TPAMI.2019.2938758},
+}
+```
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/res2net/metafile.yml b/openmmlab_test/mmclassification-0.24.1/configs/res2net/metafile.yml
new file mode 100644
index 00000000..d76f898b
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/res2net/metafile.yml
@@ -0,0 +1,70 @@
+Collections:
+ - Name: Res2Net
+ Metadata:
+ Training Data: ImageNet-1k
+ Training Techniques:
+ - SGD with Momentum
+ - Weight Decay
+ Architecture:
+ - Batch Normalization
+ - Convolution
+ - Global Average Pooling
+ - ReLU
+ - Res2Net Block
+ Paper:
+ Title: 'Res2Net: A New Multi-scale Backbone Architecture'
+ URL: https://arxiv.org/pdf/1904.01169.pdf
+ README: configs/res2net/README.md
+ Code:
+ URL: https://github.com/open-mmlab/mmclassification/blob/v0.17.0/mmcls/models/backbones/res2net.py
+ Version: v0.17.0
+
+Models:
+ - Name: res2net50-w14-s8_3rdparty_8xb32_in1k
+ Metadata:
+ FLOPs: 4220000000
+ Parameters: 25060000
+ In Collection: Res2Net
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 78.14
+ Top 5 Accuracy: 93.85
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/res2net/res2net50-w14-s8_3rdparty_8xb32_in1k_20210927-bc967bf1.pth
+ Converted From:
+ Weights: https://1drv.ms/u/s!AkxDDnOtroRPdOTqhF8ne_aakDI?e=EVb8Ri
+ Code: https://github.com/Res2Net/Res2Net-PretrainedModels/blob/master/res2net.py#L221
+ Config: configs/res2net/res2net50-w14-s8_8xb32_in1k.py
+ - Name: res2net50-w26-s8_3rdparty_8xb32_in1k
+ Metadata:
+ FLOPs: 8390000000
+ Parameters: 48400000
+ In Collection: Res2Net
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 79.20
+ Top 5 Accuracy: 94.36
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/res2net/res2net50-w26-s8_3rdparty_8xb32_in1k_20210927-f547a94b.pth
+ Converted From:
+ Weights: https://1drv.ms/u/s!AkxDDnOtroRPdTrAd_Afzc26Z7Q?e=slYqsR
+ Code: https://github.com/Res2Net/Res2Net-PretrainedModels/blob/master/res2net.py#L201
+ Config: configs/res2net/res2net50-w26-s8_8xb32_in1k.py
+ - Name: res2net101-w26-s4_3rdparty_8xb32_in1k
+ Metadata:
+ FLOPs: 8120000000
+ Parameters: 45210000
+ In Collection: Res2Net
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 79.19
+ Top 5 Accuracy: 94.44
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/res2net/res2net101-w26-s4_3rdparty_8xb32_in1k_20210927-870b6c36.pth
+ Converted From:
+ Weights: https://1drv.ms/u/s!AkxDDnOtroRPcJRgTLkahL0cFYw?e=nwbnic
+ Code: https://github.com/Res2Net/Res2Net-PretrainedModels/blob/master/res2net.py#L181
+ Config: configs/res2net/res2net101-w26-s4_8xb32_in1k.py
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/res2net/res2net101-w26-s4_8xb32_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/res2net/res2net101-w26-s4_8xb32_in1k.py
new file mode 100644
index 00000000..7ebe9e94
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/res2net/res2net101-w26-s4_8xb32_in1k.py
@@ -0,0 +1,5 @@
+_base_ = [
+ '../_base_/models/res2net101-w26-s4.py',
+ '../_base_/datasets/imagenet_bs32_pil_resize.py',
+ '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py'
+]
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/res2net/res2net50-w14-s8_8xb32_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/res2net/res2net50-w14-s8_8xb32_in1k.py
new file mode 100644
index 00000000..56cc02e3
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/res2net/res2net50-w14-s8_8xb32_in1k.py
@@ -0,0 +1,5 @@
+_base_ = [
+ '../_base_/models/res2net50-w14-s8.py',
+ '../_base_/datasets/imagenet_bs32_pil_resize.py',
+ '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py'
+]
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/res2net/res2net50-w26-s8_8xb32_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/res2net/res2net50-w26-s8_8xb32_in1k.py
new file mode 100644
index 00000000..d7dcbeb9
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/res2net/res2net50-w26-s8_8xb32_in1k.py
@@ -0,0 +1,5 @@
+_base_ = [
+ '../_base_/models/res2net50-w26-s8.py',
+ '../_base_/datasets/imagenet_bs32_pil_resize.py',
+ '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py'
+]
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/resnest/README.md b/openmmlab_test/mmclassification-0.24.1/configs/resnest/README.md
new file mode 100644
index 00000000..eb6c5fd7
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/resnest/README.md
@@ -0,0 +1,26 @@
+# ResNeSt
+
+> [ResNeSt: Split-Attention Networks](https://arxiv.org/abs/2004.08955)
+
+
+
+## Abstract
+
+It is well known that featuremap attention and multi-path representation are important for visual recognition. In this paper, we present a modularized architecture, which applies the channel-wise attention on different network branches to leverage their success in capturing cross-feature interactions and learning diverse representations. Our design results in a simple and unified computation block, which can be parameterized using only a few variables. Our model, named ResNeSt, outperforms EfficientNet in accuracy and latency trade-off on image classification. In addition, ResNeSt has achieved superior transfer learning results on several public benchmarks serving as the backbone, and has been adopted by the winning entries of COCO-LVIS challenge. The source code for complete system and pretrained models are publicly available.
+
+
+

+
+
+## Citation
+
+```
+@misc{zhang2020resnest,
+ title={ResNeSt: Split-Attention Networks},
+ author={Hang Zhang and Chongruo Wu and Zhongyue Zhang and Yi Zhu and Haibin Lin and Zhi Zhang and Yue Sun and Tong He and Jonas Mueller and R. Manmatha and Mu Li and Alexander Smola},
+ year={2020},
+ eprint={2004.08955},
+ archivePrefix={arXiv},
+ primaryClass={cs.CV}
+}
+```
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/resnest/resnest101_32xb64_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/resnest/resnest101_32xb64_in1k.py
new file mode 100644
index 00000000..27b1882c
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/resnest/resnest101_32xb64_in1k.py
@@ -0,0 +1,181 @@
+_base_ = ['../_base_/models/resnest101.py', '../_base_/default_runtime.py']
+
+# dataset settings
+dataset_type = 'ImageNet'
+img_lighting_cfg = dict(
+ eigval=[55.4625, 4.7940, 1.1475],
+ eigvec=[[-0.5675, 0.7192, 0.4009], [-0.5808, -0.0045, -0.8140],
+ [-0.5836, -0.6948, 0.4203]],
+ alphastd=0.1,
+ to_rgb=True)
+policies = [
+ dict(type='AutoContrast', prob=0.5),
+ dict(type='Equalize', prob=0.5),
+ dict(type='Invert', prob=0.5),
+ dict(
+ type='Rotate',
+ magnitude_key='angle',
+ magnitude_range=(0, 30),
+ pad_val=0,
+ prob=0.5,
+ random_negative_prob=0.5),
+ dict(
+ type='Posterize',
+ magnitude_key='bits',
+ magnitude_range=(0, 4),
+ prob=0.5),
+ dict(
+ type='Solarize',
+ magnitude_key='thr',
+ magnitude_range=(0, 256),
+ prob=0.5),
+ dict(
+ type='SolarizeAdd',
+ magnitude_key='magnitude',
+ magnitude_range=(0, 110),
+ thr=128,
+ prob=0.5),
+ dict(
+ type='ColorTransform',
+ magnitude_key='magnitude',
+ magnitude_range=(-0.9, 0.9),
+ prob=0.5,
+ random_negative_prob=0.),
+ dict(
+ type='Contrast',
+ magnitude_key='magnitude',
+ magnitude_range=(-0.9, 0.9),
+ prob=0.5,
+ random_negative_prob=0.),
+ dict(
+ type='Brightness',
+ magnitude_key='magnitude',
+ magnitude_range=(-0.9, 0.9),
+ prob=0.5,
+ random_negative_prob=0.),
+ dict(
+ type='Sharpness',
+ magnitude_key='magnitude',
+ magnitude_range=(-0.9, 0.9),
+ prob=0.5,
+ random_negative_prob=0.),
+ dict(
+ type='Shear',
+ magnitude_key='magnitude',
+ magnitude_range=(0, 0.3),
+ pad_val=0,
+ prob=0.5,
+ direction='horizontal',
+ random_negative_prob=0.5),
+ dict(
+ type='Shear',
+ magnitude_key='magnitude',
+ magnitude_range=(0, 0.3),
+ pad_val=0,
+ prob=0.5,
+ direction='vertical',
+ random_negative_prob=0.5),
+ dict(
+ type='Cutout',
+ magnitude_key='shape',
+ magnitude_range=(1, 41),
+ pad_val=0,
+ prob=0.5),
+ dict(
+ type='Translate',
+ magnitude_key='magnitude',
+ magnitude_range=(0, 0.3),
+ pad_val=0,
+ prob=0.5,
+ direction='horizontal',
+ random_negative_prob=0.5,
+ interpolation='bicubic'),
+ dict(
+ type='Translate',
+ magnitude_key='magnitude',
+ magnitude_range=(0, 0.3),
+ pad_val=0,
+ prob=0.5,
+ direction='vertical',
+ random_negative_prob=0.5,
+ interpolation='bicubic')
+]
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='RandAugment',
+ policies=policies,
+ num_policies=2,
+ magnitude_level=12),
+ dict(
+ type='RandomResizedCrop',
+ size=256,
+ efficientnet_style=True,
+ interpolation='bicubic',
+ backend='pillow'),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(type='ColorJitter', brightness=0.4, contrast=0.4, saturation=0.4),
+ dict(type='Lighting', **img_lighting_cfg),
+ dict(
+ type='Normalize',
+ mean=[123.675, 116.28, 103.53],
+ std=[58.395, 57.12, 57.375],
+ to_rgb=False),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='CenterCrop',
+ crop_size=256,
+ efficientnet_style=True,
+ interpolation='bicubic',
+ backend='pillow'),
+ dict(
+ type='Normalize',
+ mean=[123.675, 116.28, 103.53],
+ std=[58.395, 57.12, 57.375],
+ to_rgb=True),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+data = dict(
+ samples_per_gpu=64,
+ workers_per_gpu=2,
+ train=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/train',
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline),
+ test=dict(
+ # replace `data/val` with `data/test` for standard test
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline))
+evaluation = dict(interval=1, metric='accuracy')
+
+# optimizer
+optimizer = dict(
+ type='SGD',
+ lr=0.8,
+ momentum=0.9,
+ weight_decay=1e-4,
+ paramwise_cfg=dict(bias_decay_mult=0., norm_decay_mult=0.))
+optimizer_config = dict(grad_clip=None)
+
+# learning policy
+lr_config = dict(
+ policy='CosineAnnealing',
+ min_lr=0,
+ warmup='linear',
+ warmup_iters=5,
+ warmup_ratio=1e-6,
+ warmup_by_epoch=True)
+runner = dict(type='EpochBasedRunner', max_epochs=270)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/resnest/resnest101_b64x32_imagenet.py b/openmmlab_test/mmclassification-0.24.1/configs/resnest/resnest101_b64x32_imagenet.py
new file mode 100644
index 00000000..31c36477
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/resnest/resnest101_b64x32_imagenet.py
@@ -0,0 +1,6 @@
+_base_ = 'resnest101_32xb64_in1k.py'
+
+_deprecation_ = dict(
+ expected='resnest101_32xb64_in1k.py',
+ reference='https://github.com/open-mmlab/mmclassification/pull/508',
+)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/resnest/resnest200_64xb32_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/resnest/resnest200_64xb32_in1k.py
new file mode 100644
index 00000000..3b166a2d
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/resnest/resnest200_64xb32_in1k.py
@@ -0,0 +1,181 @@
+_base_ = ['../_base_/models/resnest200.py', '../_base_/default_runtime.py']
+
+# dataset settings
+dataset_type = 'ImageNet'
+img_lighting_cfg = dict(
+ eigval=[55.4625, 4.7940, 1.1475],
+ eigvec=[[-0.5675, 0.7192, 0.4009], [-0.5808, -0.0045, -0.8140],
+ [-0.5836, -0.6948, 0.4203]],
+ alphastd=0.1,
+ to_rgb=True)
+policies = [
+ dict(type='AutoContrast', prob=0.5),
+ dict(type='Equalize', prob=0.5),
+ dict(type='Invert', prob=0.5),
+ dict(
+ type='Rotate',
+ magnitude_key='angle',
+ magnitude_range=(0, 30),
+ pad_val=0,
+ prob=0.5,
+ random_negative_prob=0.5),
+ dict(
+ type='Posterize',
+ magnitude_key='bits',
+ magnitude_range=(0, 4),
+ prob=0.5),
+ dict(
+ type='Solarize',
+ magnitude_key='thr',
+ magnitude_range=(0, 256),
+ prob=0.5),
+ dict(
+ type='SolarizeAdd',
+ magnitude_key='magnitude',
+ magnitude_range=(0, 110),
+ thr=128,
+ prob=0.5),
+ dict(
+ type='ColorTransform',
+ magnitude_key='magnitude',
+ magnitude_range=(-0.9, 0.9),
+ prob=0.5,
+ random_negative_prob=0.),
+ dict(
+ type='Contrast',
+ magnitude_key='magnitude',
+ magnitude_range=(-0.9, 0.9),
+ prob=0.5,
+ random_negative_prob=0.),
+ dict(
+ type='Brightness',
+ magnitude_key='magnitude',
+ magnitude_range=(-0.9, 0.9),
+ prob=0.5,
+ random_negative_prob=0.),
+ dict(
+ type='Sharpness',
+ magnitude_key='magnitude',
+ magnitude_range=(-0.9, 0.9),
+ prob=0.5,
+ random_negative_prob=0.),
+ dict(
+ type='Shear',
+ magnitude_key='magnitude',
+ magnitude_range=(0, 0.3),
+ pad_val=0,
+ prob=0.5,
+ direction='horizontal',
+ random_negative_prob=0.5),
+ dict(
+ type='Shear',
+ magnitude_key='magnitude',
+ magnitude_range=(0, 0.3),
+ pad_val=0,
+ prob=0.5,
+ direction='vertical',
+ random_negative_prob=0.5),
+ dict(
+ type='Cutout',
+ magnitude_key='shape',
+ magnitude_range=(1, 41),
+ pad_val=0,
+ prob=0.5),
+ dict(
+ type='Translate',
+ magnitude_key='magnitude',
+ magnitude_range=(0, 0.3),
+ pad_val=0,
+ prob=0.5,
+ direction='horizontal',
+ random_negative_prob=0.5,
+ interpolation='bicubic'),
+ dict(
+ type='Translate',
+ magnitude_key='magnitude',
+ magnitude_range=(0, 0.3),
+ pad_val=0,
+ prob=0.5,
+ direction='vertical',
+ random_negative_prob=0.5,
+ interpolation='bicubic')
+]
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='RandAugment',
+ policies=policies,
+ num_policies=2,
+ magnitude_level=12),
+ dict(
+ type='RandomResizedCrop',
+ size=320,
+ efficientnet_style=True,
+ interpolation='bicubic',
+ backend='pillow'),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(type='ColorJitter', brightness=0.4, contrast=0.4, saturation=0.4),
+ dict(type='Lighting', **img_lighting_cfg),
+ dict(
+ type='Normalize',
+ mean=[123.675, 116.28, 103.53],
+ std=[58.395, 57.12, 57.375],
+ to_rgb=False),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='CenterCrop',
+ crop_size=320,
+ efficientnet_style=True,
+ interpolation='bicubic',
+ backend='pillow'),
+ dict(
+ type='Normalize',
+ mean=[123.675, 116.28, 103.53],
+ std=[58.395, 57.12, 57.375],
+ to_rgb=True),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+data = dict(
+ samples_per_gpu=32,
+ workers_per_gpu=2,
+ train=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/train',
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline),
+ test=dict(
+ # replace `data/val` with `data/test` for standard test
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline))
+evaluation = dict(interval=1, metric='accuracy')
+
+# optimizer
+optimizer = dict(
+ type='SGD',
+ lr=0.8,
+ momentum=0.9,
+ weight_decay=1e-4,
+ paramwise_cfg=dict(bias_decay_mult=0., norm_decay_mult=0.))
+optimizer_config = dict(grad_clip=None)
+
+# learning policy
+lr_config = dict(
+ policy='CosineAnnealing',
+ min_lr=0,
+ warmup='linear',
+ warmup_iters=5,
+ warmup_ratio=1e-6,
+ warmup_by_epoch=True)
+runner = dict(type='EpochBasedRunner', max_epochs=270)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/resnest/resnest200_b32x64_imagenet.py b/openmmlab_test/mmclassification-0.24.1/configs/resnest/resnest200_b32x64_imagenet.py
new file mode 100644
index 00000000..8e62865f
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/resnest/resnest200_b32x64_imagenet.py
@@ -0,0 +1,6 @@
+_base_ = 'resnest200_64xb32_in1k.py'
+
+_deprecation_ = dict(
+ expected='resnest200_64xb32_in1k.py',
+ reference='https://github.com/open-mmlab/mmclassification/pull/508',
+)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/resnest/resnest269_64xb32_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/resnest/resnest269_64xb32_in1k.py
new file mode 100644
index 00000000..7a4db092
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/resnest/resnest269_64xb32_in1k.py
@@ -0,0 +1,181 @@
+_base_ = ['../_base_/models/resnest269.py', '../_base_/default_runtime.py']
+
+# dataset settings
+dataset_type = 'ImageNet'
+img_lighting_cfg = dict(
+ eigval=[55.4625, 4.7940, 1.1475],
+ eigvec=[[-0.5675, 0.7192, 0.4009], [-0.5808, -0.0045, -0.8140],
+ [-0.5836, -0.6948, 0.4203]],
+ alphastd=0.1,
+ to_rgb=True)
+policies = [
+ dict(type='AutoContrast', prob=0.5),
+ dict(type='Equalize', prob=0.5),
+ dict(type='Invert', prob=0.5),
+ dict(
+ type='Rotate',
+ magnitude_key='angle',
+ magnitude_range=(0, 30),
+ pad_val=0,
+ prob=0.5,
+ random_negative_prob=0.5),
+ dict(
+ type='Posterize',
+ magnitude_key='bits',
+ magnitude_range=(0, 4),
+ prob=0.5),
+ dict(
+ type='Solarize',
+ magnitude_key='thr',
+ magnitude_range=(0, 256),
+ prob=0.5),
+ dict(
+ type='SolarizeAdd',
+ magnitude_key='magnitude',
+ magnitude_range=(0, 110),
+ thr=128,
+ prob=0.5),
+ dict(
+ type='ColorTransform',
+ magnitude_key='magnitude',
+ magnitude_range=(-0.9, 0.9),
+ prob=0.5,
+ random_negative_prob=0.),
+ dict(
+ type='Contrast',
+ magnitude_key='magnitude',
+ magnitude_range=(-0.9, 0.9),
+ prob=0.5,
+ random_negative_prob=0.),
+ dict(
+ type='Brightness',
+ magnitude_key='magnitude',
+ magnitude_range=(-0.9, 0.9),
+ prob=0.5,
+ random_negative_prob=0.),
+ dict(
+ type='Sharpness',
+ magnitude_key='magnitude',
+ magnitude_range=(-0.9, 0.9),
+ prob=0.5,
+ random_negative_prob=0.),
+ dict(
+ type='Shear',
+ magnitude_key='magnitude',
+ magnitude_range=(0, 0.3),
+ pad_val=0,
+ prob=0.5,
+ direction='horizontal',
+ random_negative_prob=0.5),
+ dict(
+ type='Shear',
+ magnitude_key='magnitude',
+ magnitude_range=(0, 0.3),
+ pad_val=0,
+ prob=0.5,
+ direction='vertical',
+ random_negative_prob=0.5),
+ dict(
+ type='Cutout',
+ magnitude_key='shape',
+ magnitude_range=(1, 41),
+ pad_val=0,
+ prob=0.5),
+ dict(
+ type='Translate',
+ magnitude_key='magnitude',
+ magnitude_range=(0, 0.3),
+ pad_val=0,
+ prob=0.5,
+ direction='horizontal',
+ random_negative_prob=0.5,
+ interpolation='bicubic'),
+ dict(
+ type='Translate',
+ magnitude_key='magnitude',
+ magnitude_range=(0, 0.3),
+ pad_val=0,
+ prob=0.5,
+ direction='vertical',
+ random_negative_prob=0.5,
+ interpolation='bicubic')
+]
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='RandAugment',
+ policies=policies,
+ num_policies=2,
+ magnitude_level=12),
+ dict(
+ type='RandomResizedCrop',
+ size=416,
+ efficientnet_style=True,
+ interpolation='bicubic',
+ backend='pillow'),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(type='ColorJitter', brightness=0.4, contrast=0.4, saturation=0.4),
+ dict(type='Lighting', **img_lighting_cfg),
+ dict(
+ type='Normalize',
+ mean=[123.675, 116.28, 103.53],
+ std=[58.395, 57.12, 57.375],
+ to_rgb=False),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='CenterCrop',
+ crop_size=416,
+ efficientnet_style=True,
+ interpolation='bicubic',
+ backend='pillow'),
+ dict(
+ type='Normalize',
+ mean=[123.675, 116.28, 103.53],
+ std=[58.395, 57.12, 57.375],
+ to_rgb=True),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+data = dict(
+ samples_per_gpu=32,
+ workers_per_gpu=2,
+ train=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/train',
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline),
+ test=dict(
+ # replace `data/val` with `data/test` for standard test
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline))
+evaluation = dict(interval=1, metric='accuracy')
+
+# optimizer
+optimizer = dict(
+ type='SGD',
+ lr=0.8,
+ momentum=0.9,
+ weight_decay=1e-4,
+ paramwise_cfg=dict(bias_decay_mult=0., norm_decay_mult=0.))
+optimizer_config = dict(grad_clip=None)
+
+# learning policy
+lr_config = dict(
+ policy='CosineAnnealing',
+ min_lr=0,
+ warmup='linear',
+ warmup_iters=5,
+ warmup_ratio=1e-6,
+ warmup_by_epoch=True)
+runner = dict(type='EpochBasedRunner', max_epochs=270)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/resnest/resnest269_b32x64_imagenet.py b/openmmlab_test/mmclassification-0.24.1/configs/resnest/resnest269_b32x64_imagenet.py
new file mode 100644
index 00000000..0f8b76c5
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/resnest/resnest269_b32x64_imagenet.py
@@ -0,0 +1,6 @@
+_base_ = 'resnest269_64xb32_in1k.py'
+
+_deprecation_ = dict(
+ expected='resnest269_64xb32_in1k.py',
+ reference='https://github.com/open-mmlab/mmclassification/pull/508',
+)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/resnest/resnest50_32xb64_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/resnest/resnest50_32xb64_in1k.py
new file mode 100644
index 00000000..812a3bee
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/resnest/resnest50_32xb64_in1k.py
@@ -0,0 +1,181 @@
+_base_ = ['../_base_/models/resnest50.py', '../_base_/default_runtime.py']
+
+# dataset settings
+dataset_type = 'ImageNet'
+img_lighting_cfg = dict(
+ eigval=[55.4625, 4.7940, 1.1475],
+ eigvec=[[-0.5675, 0.7192, 0.4009], [-0.5808, -0.0045, -0.8140],
+ [-0.5836, -0.6948, 0.4203]],
+ alphastd=0.1,
+ to_rgb=True)
+policies = [
+ dict(type='AutoContrast', prob=0.5),
+ dict(type='Equalize', prob=0.5),
+ dict(type='Invert', prob=0.5),
+ dict(
+ type='Rotate',
+ magnitude_key='angle',
+ magnitude_range=(0, 30),
+ pad_val=0,
+ prob=0.5,
+ random_negative_prob=0.5),
+ dict(
+ type='Posterize',
+ magnitude_key='bits',
+ magnitude_range=(0, 4),
+ prob=0.5),
+ dict(
+ type='Solarize',
+ magnitude_key='thr',
+ magnitude_range=(0, 256),
+ prob=0.5),
+ dict(
+ type='SolarizeAdd',
+ magnitude_key='magnitude',
+ magnitude_range=(0, 110),
+ thr=128,
+ prob=0.5),
+ dict(
+ type='ColorTransform',
+ magnitude_key='magnitude',
+ magnitude_range=(-0.9, 0.9),
+ prob=0.5,
+ random_negative_prob=0.),
+ dict(
+ type='Contrast',
+ magnitude_key='magnitude',
+ magnitude_range=(-0.9, 0.9),
+ prob=0.5,
+ random_negative_prob=0.),
+ dict(
+ type='Brightness',
+ magnitude_key='magnitude',
+ magnitude_range=(-0.9, 0.9),
+ prob=0.5,
+ random_negative_prob=0.),
+ dict(
+ type='Sharpness',
+ magnitude_key='magnitude',
+ magnitude_range=(-0.9, 0.9),
+ prob=0.5,
+ random_negative_prob=0.),
+ dict(
+ type='Shear',
+ magnitude_key='magnitude',
+ magnitude_range=(0, 0.3),
+ pad_val=0,
+ prob=0.5,
+ direction='horizontal',
+ random_negative_prob=0.5),
+ dict(
+ type='Shear',
+ magnitude_key='magnitude',
+ magnitude_range=(0, 0.3),
+ pad_val=0,
+ prob=0.5,
+ direction='vertical',
+ random_negative_prob=0.5),
+ dict(
+ type='Cutout',
+ magnitude_key='shape',
+ magnitude_range=(1, 41),
+ pad_val=0,
+ prob=0.5),
+ dict(
+ type='Translate',
+ magnitude_key='magnitude',
+ magnitude_range=(0, 0.3),
+ pad_val=0,
+ prob=0.5,
+ direction='horizontal',
+ random_negative_prob=0.5,
+ interpolation='bicubic'),
+ dict(
+ type='Translate',
+ magnitude_key='magnitude',
+ magnitude_range=(0, 0.3),
+ pad_val=0,
+ prob=0.5,
+ direction='vertical',
+ random_negative_prob=0.5,
+ interpolation='bicubic')
+]
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='RandAugment',
+ policies=policies,
+ num_policies=2,
+ magnitude_level=12),
+ dict(
+ type='RandomResizedCrop',
+ size=224,
+ efficientnet_style=True,
+ interpolation='bicubic',
+ backend='pillow'),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(type='ColorJitter', brightness=0.4, contrast=0.4, saturation=0.4),
+ dict(type='Lighting', **img_lighting_cfg),
+ dict(
+ type='Normalize',
+ mean=[123.675, 116.28, 103.53],
+ std=[58.395, 57.12, 57.375],
+ to_rgb=False),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='CenterCrop',
+ crop_size=224,
+ efficientnet_style=True,
+ interpolation='bicubic',
+ backend='pillow'),
+ dict(
+ type='Normalize',
+ mean=[123.675, 116.28, 103.53],
+ std=[58.395, 57.12, 57.375],
+ to_rgb=True),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+data = dict(
+ samples_per_gpu=64,
+ workers_per_gpu=2,
+ train=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/train',
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline),
+ test=dict(
+ # replace `data/val` with `data/test` for standard test
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline))
+evaluation = dict(interval=1, metric='accuracy')
+
+# optimizer
+optimizer = dict(
+ type='SGD',
+ lr=0.8,
+ momentum=0.9,
+ weight_decay=1e-4,
+ paramwise_cfg=dict(bias_decay_mult=0., norm_decay_mult=0.))
+optimizer_config = dict(grad_clip=None)
+
+# learning policy
+lr_config = dict(
+ policy='CosineAnnealing',
+ min_lr=0,
+ warmup='linear',
+ warmup_iters=5,
+ warmup_ratio=1e-6,
+ warmup_by_epoch=True)
+runner = dict(type='EpochBasedRunner', max_epochs=270)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/resnest/resnest50_b64x32_imagenet.py b/openmmlab_test/mmclassification-0.24.1/configs/resnest/resnest50_b64x32_imagenet.py
new file mode 100644
index 00000000..c0da422a
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/resnest/resnest50_b64x32_imagenet.py
@@ -0,0 +1,6 @@
+_base_ = 'resnest50_32xb64_in1k.py'
+
+_deprecation_ = dict(
+ expected='resnest50_32xb64_in1k.py',
+ reference='https://github.com/open-mmlab/mmclassification/pull/508',
+)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/resnet/README.md b/openmmlab_test/mmclassification-0.24.1/configs/resnet/README.md
new file mode 100644
index 00000000..d32fcd64
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/resnet/README.md
@@ -0,0 +1,91 @@
+# ResNet
+
+> [Deep Residual Learning for Image Recognition](https://openaccess.thecvf.com/content_cvpr_2016/html/He_Deep_Residual_Learning_CVPR_2016_paper.html)
+
+
+
+## Abstract
+
+Deeper neural networks are more difficult to train. We present a residual learning framework to ease the training of networks that are substantially deeper than those used previously. We explicitly reformulate the layers as learning residual functions with reference to the layer inputs, instead of learning unreferenced functions. We provide comprehensive empirical evidence showing that these residual networks are easier to optimize, and can gain accuracy from considerably increased depth. On the ImageNet dataset we evaluate residual nets with a depth of up to 152 layers---8x deeper than VGG nets but still having lower complexity. An ensemble of these residual nets achieves 3.57% error on the ImageNet test set. This result won the 1st place on the ILSVRC 2015 classification task. We also present analysis on CIFAR-10 with 100 and 1000 layers.
+
+The depth of representations is of central importance for many visual recognition tasks. Solely due to our extremely deep representations, we obtain a 28% relative improvement on the COCO object detection dataset. Deep residual nets are foundations of our submissions to ILSVRC & COCO 2015 competitions, where we also won the 1st places on the tasks of ImageNet detection, ImageNet localization, COCO detection, and COCO segmentation.
+
+
+

+
+
+## Results and models
+
+The pre-trained models on ImageNet-21k are used to fine-tune, and therefore don't have evaluation results.
+
+| Model | resolution | Params(M) | Flops(G) | Download |
+| :------------: | :--------: | :-------: | :------: | :-------------------------------------------------------------------------------------------------------------------: |
+| ResNet-50-mill | 224x224 | 86.74 | 15.14 | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_3rdparty-mill_in21k_20220331-faac000b.pth) |
+
+*The "mill" means using the mutil-label pretrain weight from [ImageNet-21K Pretraining for the Masses](https://github.com/Alibaba-MIIL/ImageNet21K).*
+
+### Cifar10
+
+| Model | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download |
+| :--------: | :-------: | :------: | :-------: | :-------: | :--------------------------------------------------------------------------: | :-----------------------------------------------------------------------------: |
+| ResNet-18 | 11.17 | 0.56 | 94.82 | 99.87 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet18_8xb16_cifar10.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet18_b16x8_cifar10_20210528-bd6371c8.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet18_b16x8_cifar10_20210528-bd6371c8.log.json) |
+| ResNet-34 | 21.28 | 1.16 | 95.34 | 99.87 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet34_8xb16_cifar10.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet34_b16x8_cifar10_20210528-a8aa36a6.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet34_b16x8_cifar10_20210528-a8aa36a6.log.json) |
+| ResNet-50 | 23.52 | 1.31 | 95.55 | 99.91 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet50_8xb16_cifar10.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_b16x8_cifar10_20210528-f54bfad9.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_b16x8_cifar10_20210528-f54bfad9.log.json) |
+| ResNet-101 | 42.51 | 2.52 | 95.58 | 99.87 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet101_8xb16_cifar10.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet101_b16x8_cifar10_20210528-2d29e936.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet101_b16x8_cifar10_20210528-2d29e936.log.json) |
+| ResNet-152 | 58.16 | 3.74 | 95.76 | 99.89 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet152_8xb16_cifar10.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet152_b16x8_cifar10_20210528-3e8e9178.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet152_b16x8_cifar10_20210528-3e8e9178.log.json) |
+
+### Cifar100
+
+| Model | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download |
+| :-------: | :-------: | :------: | :-------: | :-------: | :---------------------------------------------------------------------------: | :-----------------------------------------------------------------------------: |
+| ResNet-50 | 23.71 | 1.31 | 79.90 | 95.19 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet50_8xb16_cifar100.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_b16x8_cifar100_20210528-67b58a1b.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_b16x8_cifar100_20210528-67b58a1b.log.json) |
+
+### ImageNet-1k
+
+| Model | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download |
+| :----------------: | :-------: | :------: | :-------: | :-------: | :----------------------------------------------------------------------: | :-------------------------------------------------------------------------: |
+| ResNet-18 | 11.69 | 1.82 | 69.90 | 89.43 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet18_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet18_8xb32_in1k_20210831-fbbb1da6.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet18_8xb32_in1k_20210831-fbbb1da6.log.json) |
+| ResNet-34 | 21.8 | 3.68 | 73.62 | 91.59 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet34_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet34_8xb32_in1k_20210831-f257d4e6.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet34_8xb32_in1k_20210831-f257d4e6.log.json) |
+| ResNet-50 | 25.56 | 4.12 | 76.55 | 93.06 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet50_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb32_in1k_20210831-ea4938fc.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb32_in1k_20210831-ea4938fc.log.json) |
+| ResNet-101 | 44.55 | 7.85 | 77.97 | 94.06 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet101_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet101_8xb32_in1k_20210831-539c63f8.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet101_8xb32_in1k_20210831-539c63f8.log.json) |
+| ResNet-152 | 60.19 | 11.58 | 78.48 | 94.13 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet152_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet152_8xb32_in1k_20210901-4d7582fa.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet152_8xb32_in1k_20210901-4d7582fa.log.json) |
+| ResNetV1C-50 | 25.58 | 4.36 | 77.01 | 93.58 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnetv1c50_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1c50_8xb32_in1k_20220214-3343eccd.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1c50_8xb32_in1k_20220214-3343eccd.log.json) |
+| ResNetV1C-101 | 44.57 | 8.09 | 78.30 | 94.27 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnetv1c101_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1c101_8xb32_in1k_20220214-434fe45f.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1c101_8xb32_in1k_20220214-434fe45f.log.json) |
+| ResNetV1C-152 | 60.21 | 11.82 | 78.76 | 94.41 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnetv1c152_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1c152_8xb32_in1k_20220214-c013291f.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1c152_8xb32_in1k_20220214-c013291f.log.json) |
+| ResNetV1D-50 | 25.58 | 4.36 | 77.54 | 93.57 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnetv1d50_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1d50_b32x8_imagenet_20210531-db14775a.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1d50_b32x8_imagenet_20210531-db14775a.log.json) |
+| ResNetV1D-101 | 44.57 | 8.09 | 78.93 | 94.48 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnetv1d101_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1d101_b32x8_imagenet_20210531-6e13bcd3.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1d101_b32x8_imagenet_20210531-6e13bcd3.log.json) |
+| ResNetV1D-152 | 60.21 | 11.82 | 79.41 | 94.70 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnetv1d152_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1d152_b32x8_imagenet_20210531-278cf22a.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1d152_b32x8_imagenet_20210531-278cf22a.log.json) |
+| ResNet-50 (fp16) | 25.56 | 4.12 | 76.30 | 93.07 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet50_8xb32-fp16_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/fp16/resnet50_batch256_fp16_imagenet_20210320-b3964210.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/fp16/resnet50_batch256_fp16_imagenet_20210320-b3964210.log.json) |
+| Wide-ResNet-50\* | 68.88 | 11.44 | 78.48 | 94.08 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/wide-resnet50_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/wide-resnet50_3rdparty_8xb32_in1k_20220304-66678344.pth) |
+| Wide-ResNet-101\* | 126.89 | 22.81 | 78.84 | 94.28 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet101_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/wide-resnet101_3rdparty_8xb32_in1k_20220304-8d5f9d61.pth) |
+| ResNet-50 (rsb-a1) | 25.56 | 4.12 | 80.12 | 94.78 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet50_8xb256-rsb-a1-600e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb256-rsb-a1-600e_in1k_20211228-20e21305.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb256-rsb-a1-600e_in1k_20211228-20e21305.log.json) |
+| ResNet-50 (rsb-a2) | 25.56 | 4.12 | 79.55 | 94.37 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet50_8xb256-rsb-a2-300e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb256-rsb-a2-300e_in1k_20211228-0fd8be6e.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb256-rsb-a2-300e_in1k_20211228-0fd8be6e.log.json) |
+| ResNet-50 (rsb-a3) | 25.56 | 4.12 | 78.30 | 93.80 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet50_8xb256-rsb-a3-100e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb256-rsb-a3-100e_in1k_20211228-3493673c.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb256-rsb-a3-100e_in1k_20211228-3493673c.log.json) |
+
+*The "rsb" means using the training settings from [ResNet strikes back: An improved training procedure in timm](https://arxiv.org/abs/2110.00476).*
+
+*Models with * are converted from the [official repo](https://github.com/pytorch/vision). The config files of these models are only for validation. We don't ensure these config files' training accuracy and welcome you to contribute your reproduction results.*
+
+### CUB-200-2011
+
+| Model | Pretrain | resolution | Params(M) | Flops(G) | Top-1 (%) | Config | Download |
+| :-------: | :--------------------------------------------------: | :--------: | :-------: | :------: | :-------: | :------------------------------------------------: | :---------------------------------------------------: |
+| ResNet-50 | [ImageNet-21k-mill](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_3rdparty-mill_in21k_20220331-faac000b.pth) | 448x448 | 23.92 | 16.48 | 88.45 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet50_8xb8_cub.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb8_cub_20220307-57840e60.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb8_cub_20220307-57840e60.log.json) |
+
+### Stanford-Cars
+
+| Model | Pretrain | resolution | Params(M) | Flops(G) | Top-1 (%) | Config | Download |
+| :-------: | :--------------------------------------------------: | :--------: | :-------: | :------: | :-------: | :------------------------------------------------: | :---------------------------------------------------: |
+| ResNet-50 | [ImageNet-21k-mill](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_3rdparty-mill_in21k_20220331-faac000b.pth) | 448x448 | 23.92 | 16.48 | 92.82 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet50_8xb8_cars.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb8_cars_20220812-9d85901a.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb8_cars_20220812-9d85901a.log.json) |
+
+## Citation
+
+```
+@inproceedings{he2016deep,
+ title={Deep residual learning for image recognition},
+ author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian},
+ booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition},
+ pages={770--778},
+ year={2016}
+}
+```
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/resnet/metafile.yml b/openmmlab_test/mmclassification-0.24.1/configs/resnet/metafile.yml
new file mode 100644
index 00000000..4be4bf9b
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/resnet/metafile.yml
@@ -0,0 +1,365 @@
+Collections:
+ - Name: ResNet
+ Metadata:
+ Training Data: ImageNet-1k
+ Training Techniques:
+ - SGD with Momentum
+ - Weight Decay
+ Training Resources: 8x V100 GPUs
+ Epochs: 100
+ Batch Size: 256
+ Architecture:
+ - ResNet
+ Paper:
+ URL: https://openaccess.thecvf.com/content_cvpr_2016/html/He_Deep_Residual_Learning_CVPR_2016_paper.html
+ Title: "Deep Residual Learning for Image Recognition"
+ README: configs/resnet/README.md
+ Code:
+ URL: https://github.com/open-mmlab/mmclassification/blob/v0.15.0/mmcls/models/backbones/resnet.py#L383
+ Version: v0.15.0
+
+Models:
+ - Name: resnet18_8xb16_cifar10
+ Metadata:
+ Training Data: CIFAR-10
+ Epochs: 200
+ Batch Size: 128
+ FLOPs: 560000000
+ Parameters: 11170000
+ In Collection: ResNet
+ Results:
+ - Dataset: CIFAR-10
+ Metrics:
+ Top 1 Accuracy: 94.82
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnet18_b16x8_cifar10_20210528-bd6371c8.pth
+ Config: configs/resnet/resnet18_8xb16_cifar10.py
+ - Name: resnet34_8xb16_cifar10
+ Metadata:
+ Training Data: CIFAR-10
+ Epochs: 200
+ Batch Size: 128
+ FLOPs: 1160000000
+ Parameters: 21280000
+ In Collection: ResNet
+ Results:
+ - Dataset: CIFAR-10
+ Metrics:
+ Top 1 Accuracy: 95.34
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnet34_b16x8_cifar10_20210528-a8aa36a6.pth
+ Config: configs/resnet/resnet34_8xb16_cifar10.py
+ - Name: resnet50_8xb16_cifar10
+ Metadata:
+ Training Data: CIFAR-10
+ Epochs: 200
+ Batch Size: 128
+ FLOPs: 1310000000
+ Parameters: 23520000
+ In Collection: ResNet
+ Results:
+ - Dataset: CIFAR-10
+ Metrics:
+ Top 1 Accuracy: 95.55
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_b16x8_cifar10_20210528-f54bfad9.pth
+ Config: configs/resnet/resnet50_8xb16_cifar10.py
+ - Name: resnet101_8xb16_cifar10
+ Metadata:
+ Training Data: CIFAR-10
+ Epochs: 200
+ Batch Size: 128
+ FLOPs: 2520000000
+ Parameters: 42510000
+ In Collection: ResNet
+ Results:
+ - Dataset: CIFAR-10
+ Metrics:
+ Top 1 Accuracy: 95.58
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnet101_b16x8_cifar10_20210528-2d29e936.pth
+ Config: configs/resnet/resnet101_8xb16_cifar10.py
+ - Name: resnet152_8xb16_cifar10
+ Metadata:
+ Training Data: CIFAR-10
+ Epochs: 200
+ Batch Size: 128
+ FLOPs: 3740000000
+ Parameters: 58160000
+ In Collection: ResNet
+ Results:
+ - Dataset: CIFAR-10
+ Metrics:
+ Top 1 Accuracy: 95.76
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnet152_b16x8_cifar10_20210528-3e8e9178.pth
+ Config: configs/resnet/resnet152_8xb16_cifar10.py
+ - Name: resnet50_8xb16_cifar100
+ Metadata:
+ Training Data: CIFAR-100
+ Epochs: 200
+ Batch Size: 128
+ FLOPs: 1310000000
+ Parameters: 23710000
+ In Collection: ResNet
+ Results:
+ - Dataset: CIFAR-100
+ Metrics:
+ Top 1 Accuracy: 79.90
+ Top 5 Accuracy: 95.19
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_b16x8_cifar100_20210528-67b58a1b.pth
+ Config: configs/resnet/resnet50_8xb16_cifar100.py
+ - Name: resnet18_8xb32_in1k
+ Metadata:
+ FLOPs: 1820000000
+ Parameters: 11690000
+ In Collection: ResNet
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 69.90
+ Top 5 Accuracy: 89.43
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnet18_8xb32_in1k_20210831-fbbb1da6.pth
+ Config: configs/resnet/resnet18_8xb32_in1k.py
+ - Name: resnet34_8xb32_in1k
+ Metadata:
+ FLOPs: 3680000000
+ Parameters: 2180000
+ In Collection: ResNet
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 73.62
+ Top 5 Accuracy: 91.59
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnet34_8xb32_in1k_20210831-f257d4e6.pth
+ Config: configs/resnet/resnet34_8xb32_in1k.py
+ - Name: resnet50_8xb32_in1k
+ Metadata:
+ FLOPs: 4120000000
+ Parameters: 25560000
+ In Collection: ResNet
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 76.55
+ Top 5 Accuracy: 93.06
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb32_in1k_20210831-ea4938fc.pth
+ Config: configs/resnet/resnet50_8xb32_in1k.py
+ - Name: resnet101_8xb32_in1k
+ Metadata:
+ FLOPs: 7850000000
+ Parameters: 44550000
+ In Collection: ResNet
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 77.97
+ Top 5 Accuracy: 94.06
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnet101_8xb32_in1k_20210831-539c63f8.pth
+ Config: configs/resnet/resnet101_8xb32_in1k.py
+ - Name: resnet152_8xb32_in1k
+ Metadata:
+ FLOPs: 11580000000
+ Parameters: 60190000
+ In Collection: ResNet
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 78.48
+ Top 5 Accuracy: 94.13
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnet152_8xb32_in1k_20210901-4d7582fa.pth
+ Config: configs/resnet/resnet152_8xb32_in1k.py
+ - Name: resnetv1d50_8xb32_in1k
+ Metadata:
+ FLOPs: 4360000000
+ Parameters: 25580000
+ In Collection: ResNet
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 77.54
+ Top 5 Accuracy: 93.57
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1d50_b32x8_imagenet_20210531-db14775a.pth
+ Config: configs/resnet/resnetv1d50_8xb32_in1k.py
+ - Name: resnetv1d101_8xb32_in1k
+ Metadata:
+ FLOPs: 8090000000
+ Parameters: 44570000
+ In Collection: ResNet
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 78.93
+ Top 5 Accuracy: 94.48
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1d101_b32x8_imagenet_20210531-6e13bcd3.pth
+ Config: configs/resnet/resnetv1d101_8xb32_in1k.py
+ - Name: resnetv1d152_8xb32_in1k
+ Metadata:
+ FLOPs: 11820000000
+ Parameters: 60210000
+ In Collection: ResNet
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 79.41
+ Top 5 Accuracy: 94.70
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1d152_b32x8_imagenet_20210531-278cf22a.pth
+ Config: configs/resnet/resnetv1d152_8xb32_in1k.py
+ - Name: resnet50_8xb32-fp16_in1k
+ Metadata:
+ FLOPs: 4120000000
+ Parameters: 25560000
+ Training Techniques:
+ - SGD with Momentum
+ - Weight Decay
+ - Mixed Precision Training
+ In Collection: ResNet
+ Results:
+ - Task: Image Classification
+ Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 76.30
+ Top 5 Accuracy: 93.07
+ Weights: https://download.openmmlab.com/mmclassification/v0/fp16/resnet50_batch256_fp16_imagenet_20210320-b3964210.pth
+ Config: configs/resnet/resnet50_8xb32-fp16_in1k.py
+ - Name: resnet50_8xb256-rsb-a1-600e_in1k
+ Metadata:
+ FLOPs: 4120000000
+ Parameters: 25560000
+ Training Techniques:
+ - LAMB
+ - Weight Decay
+ - Cosine Annealing
+ - Mixup
+ - CutMix
+ - RepeatAugSampler
+ - RandAugment
+ Epochs: 600
+ Batch Size: 2048
+ In Collection: ResNet
+ Results:
+ - Task: Image Classification
+ Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 80.12
+ Top 5 Accuracy: 94.78
+ Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb256-rsb-a1-600e_in1k_20211228-20e21305.pth
+ Config: configs/resnet/resnet50_8xb256-rsb-a1-600e_in1k.py
+ - Name: resnet50_8xb256-rsb-a2-300e_in1k
+ Metadata:
+ FLOPs: 4120000000
+ Parameters: 25560000
+ Training Techniques:
+ - LAMB
+ - Weight Decay
+ - Cosine Annealing
+ - Mixup
+ - CutMix
+ - RepeatAugSampler
+ - RandAugment
+ Epochs: 300
+ Batch Size: 2048
+ In Collection: ResNet
+ Results:
+ - Task: Image Classification
+ Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 79.55
+ Top 5 Accuracy: 94.37
+ Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb256-rsb-a2-300e_in1k_20211228-0fd8be6e.pth
+ Config: configs/resnet/resnet50_8xb256-rsb-a2-300e_in1k.py
+ - Name: resnet50_8xb256-rsb-a3-100e_in1k
+ Metadata:
+ FLOPs: 4120000000
+ Parameters: 25560000
+ Training Techniques:
+ - LAMB
+ - Weight Decay
+ - Cosine Annealing
+ - Mixup
+ - CutMix
+ - RandAugment
+ Batch Size: 2048
+ In Collection: ResNet
+ Results:
+ - Task: Image Classification
+ Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 78.30
+ Top 5 Accuracy: 93.80
+ Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb256-rsb-a3-100e_in1k_20211228-3493673c.pth
+ Config: configs/resnet/resnet50_8xb256-rsb-a3-100e_in1k.py
+ - Name: resnetv1c50_8xb32_in1k
+ Metadata:
+ FLOPs: 4360000000
+ Parameters: 25580000
+ In Collection: ResNet
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 77.01
+ Top 5 Accuracy: 93.58
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1c50_8xb32_in1k_20220214-3343eccd.pth
+ Config: configs/resnet/resnetv1c50_8xb32_in1k.py
+ - Name: resnetv1c101_8xb32_in1k
+ Metadata:
+ FLOPs: 8090000000
+ Parameters: 44570000
+ In Collection: ResNet
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 78.30
+ Top 5 Accuracy: 94.27
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1c101_8xb32_in1k_20220214-434fe45f.pth
+ Config: configs/resnet/resnetv1c101_8xb32_in1k.py
+ - Name: resnetv1c152_8xb32_in1k
+ Metadata:
+ FLOPs: 11820000000
+ Parameters: 60210000
+ In Collection: ResNet
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 78.76
+ Top 5 Accuracy: 94.41
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1c152_8xb32_in1k_20220214-c013291f.pth
+ Config: configs/resnet/resnetv1c152_8xb32_in1k.py
+ - Name: resnet50_8xb8_cub
+ Metadata:
+ FLOPs: 16480000000
+ Parameters: 23920000
+ In Collection: ResNet
+ Results:
+ - Dataset: CUB-200-2011
+ Metrics:
+ Top 1 Accuracy: 88.45
+ Task: Image Classification
+ Pretrain: https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_3rdparty-mill_in21k_20220331-faac000b.pth
+ Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb8_cub_20220307-57840e60.pth
+ Config: configs/resnet/resnet50_8xb8_cub.py
+ - Name: resnet50_8xb8_cars
+ Metadata:
+ FLOPs: 16480000000
+ Parameters: 23920000
+ In Collection: ResNet
+ Results:
+ - Dataset: StanfordCars
+ Metrics:
+ Top 1 Accuracy: 92.82
+ Task: Image Classification
+ Pretrain: https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_3rdparty-mill_in21k_20220331-faac000b.pth
+ Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb8_cars_20220812-9d85901a.pth
+ Config: configs/resnet/resnet50_8xb8_cars.py
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/resnet/resnet101_b16x8_cifar10.py b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet101_8xb16_cifar10.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/resnet/resnet101_b16x8_cifar10.py
rename to openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet101_8xb16_cifar10.py
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/resnet/resnet101_b32x8_imagenet.py b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet101_8xb32_in1k.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/resnet/resnet101_b32x8_imagenet.py
rename to openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet101_8xb32_in1k.py
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet101_b16x8_cifar10.py b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet101_b16x8_cifar10.py
new file mode 100644
index 00000000..57758f2d
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet101_b16x8_cifar10.py
@@ -0,0 +1,6 @@
+_base_ = 'resnet101_8xb16_cifar10.py'
+
+_deprecation_ = dict(
+ expected='resnet101_8xb16_cifar10.py',
+ reference='https://github.com/open-mmlab/mmclassification/pull/508',
+)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet101_b32x8_imagenet.py b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet101_b32x8_imagenet.py
new file mode 100644
index 00000000..8d45adc3
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet101_b32x8_imagenet.py
@@ -0,0 +1,6 @@
+_base_ = 'resnet101_8xb32_in1k.py'
+
+_deprecation_ = dict(
+ expected='resnet101_8xb32_in1k.py',
+ reference='https://github.com/open-mmlab/mmclassification/pull/508',
+)
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/resnet/resnet152_b16x8_cifar10.py b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet152_8xb16_cifar10.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/resnet/resnet152_b16x8_cifar10.py
rename to openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet152_8xb16_cifar10.py
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/resnet/resnet152_b32x8_imagenet.py b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet152_8xb32_in1k.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/resnet/resnet152_b32x8_imagenet.py
rename to openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet152_8xb32_in1k.py
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet152_b16x8_cifar10.py b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet152_b16x8_cifar10.py
new file mode 100644
index 00000000..5c76cac6
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet152_b16x8_cifar10.py
@@ -0,0 +1,6 @@
+_base_ = 'resnet152_8xb16_cifar10.py'
+
+_deprecation_ = dict(
+ expected='resnet152_8xb16_cifar10.py',
+ reference='https://github.com/open-mmlab/mmclassification/pull/508',
+)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet152_b32x8_imagenet.py b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet152_b32x8_imagenet.py
new file mode 100644
index 00000000..133638a4
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet152_b32x8_imagenet.py
@@ -0,0 +1,6 @@
+_base_ = 'resnet152_8xb32_in1k.py'
+
+_deprecation_ = dict(
+ expected='resnet152_8xb32_in1k.py',
+ reference='https://github.com/open-mmlab/mmclassification/pull/508',
+)
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/resnet/resnet18_b16x8_cifar10.py b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet18_8xb16_cifar10.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/resnet/resnet18_b16x8_cifar10.py
rename to openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet18_8xb16_cifar10.py
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/resnet/resnet18_b32x8_imagenet.py b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet18_8xb32_in1k.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/resnet/resnet18_b32x8_imagenet.py
rename to openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet18_8xb32_in1k.py
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet18_b16x8_cifar10.py b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet18_b16x8_cifar10.py
new file mode 100644
index 00000000..5a25a0e4
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet18_b16x8_cifar10.py
@@ -0,0 +1,6 @@
+_base_ = 'resnet18_8xb16_cifar10.py'
+
+_deprecation_ = dict(
+ expected='resnet18_8xb16_cifar10.py',
+ reference='https://github.com/open-mmlab/mmclassification/pull/508',
+)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet18_b32x8_imagenet.py b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet18_b32x8_imagenet.py
new file mode 100644
index 00000000..e6d08f60
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet18_b32x8_imagenet.py
@@ -0,0 +1,6 @@
+_base_ = 'resnet18_8xb32_in1k.py'
+
+_deprecation_ = dict(
+ expected='resnet18_8xb32_in1k.py',
+ reference='https://github.com/open-mmlab/mmclassification/pull/508',
+)
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/resnet/resnet34_b16x8_cifar10.py b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet34_8xb16_cifar10.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/resnet/resnet34_b16x8_cifar10.py
rename to openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet34_8xb16_cifar10.py
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/resnet/resnet34_b32x8_imagenet.py b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet34_8xb32_in1k.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/resnet/resnet34_b32x8_imagenet.py
rename to openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet34_8xb32_in1k.py
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet34_b16x8_cifar10.py b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet34_b16x8_cifar10.py
new file mode 100644
index 00000000..eec98b2a
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet34_b16x8_cifar10.py
@@ -0,0 +1,6 @@
+_base_ = 'resnet34_8xb16_cifar10.py'
+
+_deprecation_ = dict(
+ expected='resnet34_8xb16_cifar10.py',
+ reference='https://github.com/open-mmlab/mmclassification/pull/508',
+)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet34_b32x8_imagenet.py b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet34_b32x8_imagenet.py
new file mode 100644
index 00000000..144613a3
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet34_b32x8_imagenet.py
@@ -0,0 +1,6 @@
+_base_ = 'resnet34_8xb32_in1k.py'
+
+_deprecation_ = dict(
+ expected='resnet34_8xb32_in1k.py',
+ reference='https://github.com/open-mmlab/mmclassification/pull/508',
+)
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/resnet/resnet50_b64x32_warmup_coslr_imagenet.py b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_32xb64-warmup-coslr_in1k.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/resnet/resnet50_b64x32_warmup_coslr_imagenet.py
rename to openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_32xb64-warmup-coslr_in1k.py
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_32xb64-warmup-lbs_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_32xb64-warmup-lbs_in1k.py
new file mode 100644
index 00000000..2f24f9a0
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_32xb64-warmup-lbs_in1k.py
@@ -0,0 +1,12 @@
+_base_ = ['./resnet50_32xb64-warmup_in1k.py']
+model = dict(
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=2048,
+ loss=dict(
+ type='LabelSmoothLoss',
+ loss_weight=1.0,
+ label_smooth_val=0.1,
+ num_classes=1000),
+ ))
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/resnet/resnet50_b64x32_warmup_imagenet.py b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_32xb64-warmup_in1k.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/resnet/resnet50_b64x32_warmup_imagenet.py
rename to openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_32xb64-warmup_in1k.py
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_8xb128_coslr-90e_in21k.py b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_8xb128_coslr-90e_in21k.py
new file mode 100644
index 00000000..8cc79211
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_8xb128_coslr-90e_in21k.py
@@ -0,0 +1,11 @@
+_base_ = [
+ '../_base_/models/resnet50.py', '../_base_/datasets/imagenet21k_bs128.py',
+ '../_base_/schedules/imagenet_bs1024_coslr.py',
+ '../_base_/default_runtime.py'
+]
+
+# model settings
+model = dict(head=dict(num_classes=21843))
+
+# runtime settings
+runner = dict(type='EpochBasedRunner', max_epochs=90)
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/resnet/resnet50_b16x8_cifar10_mixup.py b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_8xb16-mixup_cifar10.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/resnet/resnet50_b16x8_cifar10_mixup.py
rename to openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_8xb16-mixup_cifar10.py
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/resnet/resnet50_b16x8_cifar10.py b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_8xb16_cifar10.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/resnet/resnet50_b16x8_cifar10.py
rename to openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_8xb16_cifar10.py
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/resnet/resnet50_b16x8_cifar100.py b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_8xb16_cifar100.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/resnet/resnet50_b16x8_cifar100.py
rename to openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_8xb16_cifar100.py
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_8xb256-rsb-a1-600e_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_8xb256-rsb-a1-600e_in1k.py
new file mode 100644
index 00000000..192776fc
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_8xb256-rsb-a1-600e_in1k.py
@@ -0,0 +1,33 @@
+_base_ = [
+ '../_base_/models/resnet50.py',
+ '../_base_/datasets/imagenet_bs256_rsb_a12.py',
+ '../_base_/schedules/imagenet_bs2048_rsb.py',
+ '../_base_/default_runtime.py'
+]
+
+# Model settings
+model = dict(
+ backbone=dict(
+ norm_cfg=dict(type='SyncBN', requires_grad=True),
+ drop_path_rate=0.05,
+ ),
+ head=dict(
+ loss=dict(
+ type='LabelSmoothLoss',
+ label_smooth_val=0.1,
+ mode='original',
+ )),
+ train_cfg=dict(augments=[
+ dict(type='BatchMixup', alpha=0.2, num_classes=1000, prob=0.5),
+ dict(type='BatchCutMix', alpha=1.0, num_classes=1000, prob=0.5)
+ ]))
+
+# Dataset settings
+sampler = dict(type='RepeatAugSampler')
+
+# Schedule settings
+runner = dict(max_epochs=600)
+optimizer = dict(
+ weight_decay=0.01,
+ paramwise_cfg=dict(bias_decay_mult=0., norm_decay_mult=0.),
+)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_8xb256-rsb-a2-300e_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_8xb256-rsb-a2-300e_in1k.py
new file mode 100644
index 00000000..fcdc880e
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_8xb256-rsb-a2-300e_in1k.py
@@ -0,0 +1,25 @@
+_base_ = [
+ '../_base_/models/resnet50.py',
+ '../_base_/datasets/imagenet_bs256_rsb_a12.py',
+ '../_base_/schedules/imagenet_bs2048_rsb.py',
+ '../_base_/default_runtime.py'
+]
+
+# Model settings
+model = dict(
+ backbone=dict(
+ norm_cfg=dict(type='SyncBN', requires_grad=True),
+ drop_path_rate=0.05,
+ ),
+ head=dict(loss=dict(use_sigmoid=True)),
+ train_cfg=dict(augments=[
+ dict(type='BatchMixup', alpha=0.1, num_classes=1000, prob=0.5),
+ dict(type='BatchCutMix', alpha=1.0, num_classes=1000, prob=0.5)
+ ]))
+
+# Dataset settings
+sampler = dict(type='RepeatAugSampler')
+
+# Schedule settings
+runner = dict(max_epochs=300)
+optimizer = dict(paramwise_cfg=dict(bias_decay_mult=0., norm_decay_mult=0.))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_8xb256-rsb-a3-100e_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_8xb256-rsb-a3-100e_in1k.py
new file mode 100644
index 00000000..4ff52ac8
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_8xb256-rsb-a3-100e_in1k.py
@@ -0,0 +1,19 @@
+_base_ = [
+ '../_base_/models/resnet50.py',
+ '../_base_/datasets/imagenet_bs256_rsb_a3.py',
+ '../_base_/schedules/imagenet_bs2048_rsb.py',
+ '../_base_/default_runtime.py'
+]
+
+# Model settings
+model = dict(
+ backbone=dict(norm_cfg=dict(type='SyncBN', requires_grad=True)),
+ head=dict(loss=dict(use_sigmoid=True)),
+ train_cfg=dict(augments=[
+ dict(type='BatchMixup', alpha=0.1, num_classes=1000, prob=0.5),
+ dict(type='BatchCutMix', alpha=1.0, num_classes=1000, prob=0.5)
+ ]))
+
+# Schedule settings
+optimizer = dict(
+ lr=0.008, paramwise_cfg=dict(bias_decay_mult=0., norm_decay_mult=0.))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_8xb32-coslr-preciseBN_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_8xb32-coslr-preciseBN_in1k.py
new file mode 100644
index 00000000..dab82c6e
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_8xb32-coslr-preciseBN_in1k.py
@@ -0,0 +1,12 @@
+_base_ = 'resnet50_8xb32-coslr_in1k.py'
+
+# Precise BN hook will update the bn stats, so this hook should be executed
+# before CheckpointHook, which has priority of 'NORMAL'. So set the
+# priority of PreciseBNHook to 'ABOVE_NORMAL' here.
+custom_hooks = [
+ dict(
+ type='PreciseBNHook',
+ num_samples=8192,
+ interval=1,
+ priority='ABOVE_NORMAL')
+]
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/resnet/resnet50_b32x8_coslr_imagenet.py b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_8xb32-coslr_in1k.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/resnet/resnet50_b32x8_coslr_imagenet.py
rename to openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_8xb32-coslr_in1k.py
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/resnet/resnet50_b32x8_cutmix_imagenet.py b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_8xb32-cutmix_in1k.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/resnet/resnet50_b32x8_cutmix_imagenet.py
rename to openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_8xb32-cutmix_in1k.py
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_8xb32-fp16-dynamic_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_8xb32-fp16-dynamic_in1k.py
new file mode 100644
index 00000000..7a6c93c3
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_8xb32-fp16-dynamic_in1k.py
@@ -0,0 +1,4 @@
+_base_ = ['./resnet50_8xb32_in1k.py']
+
+# fp16 settings
+fp16 = dict(loss_scale='dynamic')
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_8xb32-fp16_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_8xb32-fp16_in1k.py
new file mode 100644
index 00000000..4245d198
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_8xb32-fp16_in1k.py
@@ -0,0 +1,4 @@
+_base_ = ['./resnet50_8xb32_in1k.py']
+
+# fp16 settings
+fp16 = dict(loss_scale=512.)
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/resnet/resnet50_b32x8_label_smooth_imagenet.py b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_8xb32-lbs_in1k.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/resnet/resnet50_b32x8_label_smooth_imagenet.py
rename to openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_8xb32-lbs_in1k.py
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/resnet/resnet50_b32x8_mixup_imagenet.py b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_8xb32-mixup_in1k.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/resnet/resnet50_b32x8_mixup_imagenet.py
rename to openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_8xb32-mixup_in1k.py
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_8xb32_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_8xb32_in1k.py
new file mode 100644
index 00000000..b3cf5869
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_8xb32_in1k.py
@@ -0,0 +1,6 @@
+_base_ = [
+ '../_base_/models/resnet50.py', '../_base_/datasets/imagenet_bs32.py',
+ '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py'
+]
+
+fp16 = dict(loss_scale=512.)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_8xb8_cars.py b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_8xb8_cars.py
new file mode 100644
index 00000000..2d2db45d
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_8xb8_cars.py
@@ -0,0 +1,19 @@
+_base_ = [
+ '../_base_/models/resnet50.py',
+ '../_base_/datasets/stanford_cars_bs8_448.py',
+ '../_base_/schedules/stanford_cars_bs8.py', '../_base_/default_runtime.py'
+]
+
+# use pre-train weight converted from https://github.com/Alibaba-MIIL/ImageNet21K # noqa
+checkpoint = 'https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_3rdparty-mill_in21k_20220331-faac000b.pth' # noqa
+
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ init_cfg=dict(
+ type='Pretrained', checkpoint=checkpoint, prefix='backbone')),
+ head=dict(num_classes=196, ))
+
+log_config = dict(interval=50)
+checkpoint_config = dict(
+ interval=1, max_keep_ckpts=3) # save last three checkpoints
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_8xb8_cub.py b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_8xb8_cub.py
new file mode 100644
index 00000000..dffb076c
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_8xb8_cub.py
@@ -0,0 +1,19 @@
+_base_ = [
+ '../_base_/models/resnet50.py', '../_base_/datasets/cub_bs8_448.py',
+ '../_base_/schedules/cub_bs64.py', '../_base_/default_runtime.py'
+]
+
+# use pre-train weight converted from https://github.com/Alibaba-MIIL/ImageNet21K # noqa
+checkpoint = 'https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_3rdparty-mill_in21k_20220331-faac000b.pth' # noqa
+
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ init_cfg=dict(
+ type='Pretrained', checkpoint=checkpoint, prefix='backbone')),
+ head=dict(num_classes=200, ))
+
+log_config = dict(interval=20) # log every 20 intervals
+
+checkpoint_config = dict(
+ interval=1, max_keep_ckpts=3) # save last three checkpoints
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_b16x8_cifar10.py b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_b16x8_cifar10.py
new file mode 100644
index 00000000..e40d1ee3
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_b16x8_cifar10.py
@@ -0,0 +1,6 @@
+_base_ = 'resnet50_8xb16_cifar10.py'
+
+_deprecation_ = dict(
+ expected='resnet50_8xb16_cifar10.py',
+ reference='https://github.com/open-mmlab/mmclassification/pull/508',
+)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_b16x8_cifar100.py b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_b16x8_cifar100.py
new file mode 100644
index 00000000..b49b6f45
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_b16x8_cifar100.py
@@ -0,0 +1,6 @@
+_base_ = 'resnet50_8xb16_cifar100.py'
+
+_deprecation_ = dict(
+ expected='resnet50_8xb16_cifar100.py',
+ reference='https://github.com/open-mmlab/mmclassification/pull/508',
+)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_b16x8_cifar10_mixup.py b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_b16x8_cifar10_mixup.py
new file mode 100644
index 00000000..409a40e9
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_b16x8_cifar10_mixup.py
@@ -0,0 +1,6 @@
+_base_ = 'resnet50_8xb16-mixup_cifar10.py'
+
+_deprecation_ = dict(
+ expected='resnet50_8xb16-mixup_cifar10.py',
+ reference='https://github.com/open-mmlab/mmclassification/pull/508',
+)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_b32x8_coslr_imagenet.py b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_b32x8_coslr_imagenet.py
new file mode 100644
index 00000000..647153b4
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_b32x8_coslr_imagenet.py
@@ -0,0 +1,6 @@
+_base_ = 'resnet50_8xb32-coslr_in1k.py'
+
+_deprecation_ = dict(
+ expected='resnet50_8xb32-coslr_in1k.py',
+ reference='https://github.com/open-mmlab/mmclassification/pull/508',
+)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_b32x8_cutmix_imagenet.py b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_b32x8_cutmix_imagenet.py
new file mode 100644
index 00000000..87b27d5a
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_b32x8_cutmix_imagenet.py
@@ -0,0 +1,6 @@
+_base_ = 'resnet50_8xb32-cutmix_in1k.py'
+
+_deprecation_ = dict(
+ expected='resnet50_8xb32-cutmix_in1k.py',
+ reference='https://github.com/open-mmlab/mmclassification/pull/508',
+)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_b32x8_imagenet.py b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_b32x8_imagenet.py
new file mode 100644
index 00000000..7d7f69ec
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_b32x8_imagenet.py
@@ -0,0 +1,6 @@
+_base_ = 'resnet50_8xb32_in1k.py'
+
+_deprecation_ = dict(
+ expected='resnet50_8xb32_in1k.py',
+ reference='https://github.com/open-mmlab/mmclassification/pull/508',
+)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_b32x8_label_smooth_imagenet.py b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_b32x8_label_smooth_imagenet.py
new file mode 100644
index 00000000..6e874155
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_b32x8_label_smooth_imagenet.py
@@ -0,0 +1,6 @@
+_base_ = 'resnet50_8xb32-lbs_in1k.py'
+
+_deprecation_ = dict(
+ expected='resnet50_8xb32-lbs_in1k.py',
+ reference='https://github.com/open-mmlab/mmclassification/pull/508',
+)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_b32x8_mixup_imagenet.py b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_b32x8_mixup_imagenet.py
new file mode 100644
index 00000000..3405319d
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_b32x8_mixup_imagenet.py
@@ -0,0 +1,6 @@
+_base_ = 'resnet50_8xb32-mixup_in1k.py'
+
+_deprecation_ = dict(
+ expected='resnet50_8xb32-mixup_in1k.py',
+ reference='https://github.com/open-mmlab/mmclassification/pull/508',
+)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_b64x32_warmup_coslr_imagenet.py b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_b64x32_warmup_coslr_imagenet.py
new file mode 100644
index 00000000..4724616c
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_b64x32_warmup_coslr_imagenet.py
@@ -0,0 +1,6 @@
+_base_ = 'resnet50_32xb64-warmup-coslr_in1k.py'
+
+_deprecation_ = dict(
+ expected='resnet50_32xb64-warmup-coslr_in1k.py',
+ reference='https://github.com/open-mmlab/mmclassification/pull/508',
+)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_b64x32_warmup_imagenet.py b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_b64x32_warmup_imagenet.py
new file mode 100644
index 00000000..3e350541
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_b64x32_warmup_imagenet.py
@@ -0,0 +1,6 @@
+_base_ = 'resnet50_32xb64-warmup_in1k.py'
+
+_deprecation_ = dict(
+ expected='resnet50_32xb64-warmup_in1k.py',
+ reference='https://github.com/open-mmlab/mmclassification/pull/508',
+)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_b64x32_warmup_label_smooth_imagenet.py b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_b64x32_warmup_label_smooth_imagenet.py
new file mode 100644
index 00000000..2544e33f
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnet50_b64x32_warmup_label_smooth_imagenet.py
@@ -0,0 +1,6 @@
+_base_ = 'resnet50_32xb64-warmup-lbs_in1k.py'
+
+_deprecation_ = dict(
+ expected='resnet50_32xb64-warmup-lbs_in1k.py',
+ reference='https://github.com/open-mmlab/mmclassification/pull/508',
+)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnetv1c101_8xb32_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnetv1c101_8xb32_in1k.py
new file mode 100644
index 00000000..441aff59
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnetv1c101_8xb32_in1k.py
@@ -0,0 +1,7 @@
+_base_ = [
+ '../_base_/models/resnetv1c50.py',
+ '../_base_/datasets/imagenet_bs32_pil_resize.py',
+ '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py'
+]
+
+model = dict(backbone=dict(depth=101))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnetv1c152_8xb32_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnetv1c152_8xb32_in1k.py
new file mode 100644
index 00000000..b9f466f8
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnetv1c152_8xb32_in1k.py
@@ -0,0 +1,7 @@
+_base_ = [
+ '../_base_/models/resnetv1c50.py',
+ '../_base_/datasets/imagenet_bs32_pil_resize.py',
+ '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py'
+]
+
+model = dict(backbone=dict(depth=152))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnetv1c50_8xb32_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnetv1c50_8xb32_in1k.py
new file mode 100644
index 00000000..aa1c8b64
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnetv1c50_8xb32_in1k.py
@@ -0,0 +1,5 @@
+_base_ = [
+ '../_base_/models/resnetv1c50.py',
+ '../_base_/datasets/imagenet_bs32_pil_resize.py',
+ '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py'
+]
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/resnet/resnetv1d101_b32x8_imagenet.py b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnetv1d101_8xb32_in1k.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/resnet/resnetv1d101_b32x8_imagenet.py
rename to openmmlab_test/mmclassification-0.24.1/configs/resnet/resnetv1d101_8xb32_in1k.py
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnetv1d101_b32x8_imagenet.py b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnetv1d101_b32x8_imagenet.py
new file mode 100644
index 00000000..e736937e
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnetv1d101_b32x8_imagenet.py
@@ -0,0 +1,6 @@
+_base_ = 'resnetv1d101_8xb32_in1k.py'
+
+_deprecation_ = dict(
+ expected='resnetv1d101_8xb32_in1k.py',
+ reference='https://github.com/open-mmlab/mmclassification/pull/508',
+)
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/resnet/resnetv1d152_b32x8_imagenet.py b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnetv1d152_8xb32_in1k.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/resnet/resnetv1d152_b32x8_imagenet.py
rename to openmmlab_test/mmclassification-0.24.1/configs/resnet/resnetv1d152_8xb32_in1k.py
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnetv1d152_b32x8_imagenet.py b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnetv1d152_b32x8_imagenet.py
new file mode 100644
index 00000000..88e5b9f0
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnetv1d152_b32x8_imagenet.py
@@ -0,0 +1,6 @@
+_base_ = 'resnetv1d152_8xb32_in1k.py'
+
+_deprecation_ = dict(
+ expected='resnetv1d152_8xb32_in1k.py',
+ reference='https://github.com/open-mmlab/mmclassification/pull/508',
+)
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/resnet/resnetv1d50_b32x8_imagenet.py b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnetv1d50_8xb32_in1k.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/resnet/resnetv1d50_b32x8_imagenet.py
rename to openmmlab_test/mmclassification-0.24.1/configs/resnet/resnetv1d50_8xb32_in1k.py
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnetv1d50_b32x8_imagenet.py b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnetv1d50_b32x8_imagenet.py
new file mode 100644
index 00000000..5455e055
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/resnet/resnetv1d50_b32x8_imagenet.py
@@ -0,0 +1,6 @@
+_base_ = 'resnetv1d50_8xb32_in1k.py'
+
+_deprecation_ = dict(
+ expected='resnetv1d50_8xb32_in1k.py',
+ reference='https://github.com/open-mmlab/mmclassification/pull/508',
+)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/resnext/README.md b/openmmlab_test/mmclassification-0.24.1/configs/resnext/README.md
new file mode 100644
index 00000000..56df277e
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/resnext/README.md
@@ -0,0 +1,36 @@
+# ResNeXt
+
+> [Aggregated Residual Transformations for Deep Neural Networks](https://openaccess.thecvf.com/content_cvpr_2017/html/Xie_Aggregated_Residual_Transformations_CVPR_2017_paper.html)
+
+
+
+## Abstract
+
+We present a simple, highly modularized network architecture for image classification. Our network is constructed by repeating a building block that aggregates a set of transformations with the same topology. Our simple design results in a homogeneous, multi-branch architecture that has only a few hyper-parameters to set. This strategy exposes a new dimension, which we call "cardinality" (the size of the set of transformations), as an essential factor in addition to the dimensions of depth and width. On the ImageNet-1K dataset, we empirically show that even under the restricted condition of maintaining complexity, increasing cardinality is able to improve classification accuracy. Moreover, increasing cardinality is more effective than going deeper or wider when we increase the capacity. Our models, named ResNeXt, are the foundations of our entry to the ILSVRC 2016 classification task in which we secured 2nd place. We further investigate ResNeXt on an ImageNet-5K set and the COCO detection set, also showing better results than its ResNet counterpart. The code and models are publicly available online.
+
+
+

+
+
+## Results and models
+
+### ImageNet-1k
+
+| Model | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download |
+| :---------------: | :-------: | :------: | :-------: | :-------: | :-----------------------------------------------------------------------: | :-------------------------------------------------------------------------: |
+| ResNeXt-32x4d-50 | 25.03 | 4.27 | 77.90 | 93.66 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnext/resnext50-32x4d_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnext/resnext50_32x4d_b32x8_imagenet_20210429-56066e27.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnext/resnext50_32x4d_b32x8_imagenet_20210429-56066e27.log.json) |
+| ResNeXt-32x4d-101 | 44.18 | 8.03 | 78.61 | 94.17 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnext/resnext101-32x4d_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnext/resnext101_32x4d_b32x8_imagenet_20210506-e0fa3dd5.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnext/resnext101_32x4d_b32x8_imagenet_20210506-e0fa3dd5.log.json) |
+| ResNeXt-32x8d-101 | 88.79 | 16.5 | 79.27 | 94.58 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnext/resnext101-32x8d_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnext/resnext101_32x8d_b32x8_imagenet_20210506-23a247d5.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnext/resnext101_32x8d_b32x8_imagenet_20210506-23a247d5.log.json) |
+| ResNeXt-32x4d-152 | 59.95 | 11.8 | 78.88 | 94.33 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnext/resnext152-32x4d_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnext/resnext152_32x4d_b32x8_imagenet_20210524-927787be.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnext/resnext152_32x4d_b32x8_imagenet_20210524-927787be.log.json) |
+
+## Citation
+
+```
+@inproceedings{xie2017aggregated,
+ title={Aggregated residual transformations for deep neural networks},
+ author={Xie, Saining and Girshick, Ross and Doll{\'a}r, Piotr and Tu, Zhuowen and He, Kaiming},
+ booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition},
+ pages={1492--1500},
+ year={2017}
+}
+```
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/resnext/metafile.yml b/openmmlab_test/mmclassification-0.24.1/configs/resnext/metafile.yml
new file mode 100644
index 00000000..c68e7f9d
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/resnext/metafile.yml
@@ -0,0 +1,73 @@
+Collections:
+ - Name: ResNeXt
+ Metadata:
+ Training Data: ImageNet-1k
+ Training Techniques:
+ - SGD with Momentum
+ - Weight Decay
+ Training Resources: 8x V100 GPUs
+ Epochs: 100
+ Batch Size: 256
+ Architecture:
+ - ResNeXt
+ Paper:
+ URL: https://openaccess.thecvf.com/content_cvpr_2017/html/Xie_Aggregated_Residual_Transformations_CVPR_2017_paper.html
+ Title: "Aggregated Residual Transformations for Deep Neural Networks"
+ README: configs/resnext/README.md
+ Code:
+ URL: https://github.com/open-mmlab/mmclassification/blob/v0.15.0/mmcls/models/backbones/resnext.py#L90
+ Version: v0.15.0
+
+Models:
+ - Name: resnext50-32x4d_8xb32_in1k
+ Metadata:
+ FLOPs: 4270000000
+ Parameters: 25030000
+ In Collection: ResNeXt
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 77.90
+ Top 5 Accuracy: 93.66
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/resnext/resnext50_32x4d_b32x8_imagenet_20210429-56066e27.pth
+ Config: configs/resnext/resnext50-32x4d_8xb32_in1k.py
+ - Name: resnext101-32x4d_8xb32_in1k
+ Metadata:
+ FLOPs: 8030000000
+ Parameters: 44180000
+ In Collection: ResNeXt
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 78.61
+ Top 5 Accuracy: 94.17
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/resnext/resnext101_32x4d_b32x8_imagenet_20210506-e0fa3dd5.pth
+ Config: configs/resnext/resnext101-32x4d_8xb32_in1k.py
+ - Name: resnext101-32x8d_8xb32_in1k
+ Metadata:
+ FLOPs: 16500000000
+ Parameters: 88790000
+ In Collection: ResNeXt
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 79.27
+ Top 5 Accuracy: 94.58
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/resnext/resnext101_32x8d_b32x8_imagenet_20210506-23a247d5.pth
+ Config: configs/resnext/resnext101-32x8d_8xb32_in1k.py
+ - Name: resnext152-32x4d_8xb32_in1k
+ Metadata:
+ FLOPs: 11800000000
+ Parameters: 59950000
+ In Collection: ResNeXt
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 78.88
+ Top 5 Accuracy: 94.33
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/resnext/resnext152_32x4d_b32x8_imagenet_20210524-927787be.pth
+ Config: configs/resnext/resnext152-32x4d_8xb32_in1k.py
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/resnext/resnext101_32x4d_b32x8_imagenet.py b/openmmlab_test/mmclassification-0.24.1/configs/resnext/resnext101-32x4d_8xb32_in1k.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/resnext/resnext101_32x4d_b32x8_imagenet.py
rename to openmmlab_test/mmclassification-0.24.1/configs/resnext/resnext101-32x4d_8xb32_in1k.py
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/resnext/resnext101_32x8d_b32x8_imagenet.py b/openmmlab_test/mmclassification-0.24.1/configs/resnext/resnext101-32x8d_8xb32_in1k.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/resnext/resnext101_32x8d_b32x8_imagenet.py
rename to openmmlab_test/mmclassification-0.24.1/configs/resnext/resnext101-32x8d_8xb32_in1k.py
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/resnext/resnext101_32x4d_b32x8_imagenet.py b/openmmlab_test/mmclassification-0.24.1/configs/resnext/resnext101_32x4d_b32x8_imagenet.py
new file mode 100644
index 00000000..07d66c35
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/resnext/resnext101_32x4d_b32x8_imagenet.py
@@ -0,0 +1,6 @@
+_base_ = 'resnext101-32x4d_8xb32_in1k.py'
+
+_deprecation_ = dict(
+ expected='resnext101-32x4d_8xb32_in1k.py',
+ reference='https://github.com/open-mmlab/mmclassification/pull/508',
+)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/resnext/resnext101_32x8d_b32x8_imagenet.py b/openmmlab_test/mmclassification-0.24.1/configs/resnext/resnext101_32x8d_b32x8_imagenet.py
new file mode 100644
index 00000000..071ca60f
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/resnext/resnext101_32x8d_b32x8_imagenet.py
@@ -0,0 +1,6 @@
+_base_ = 'resnext101-32x8d_8xb32_in1k.py'
+
+_deprecation_ = dict(
+ expected='resnext101-32x8d_8xb32_in1k.py',
+ reference='https://github.com/open-mmlab/mmclassification/pull/508',
+)
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/resnext/resnext152_32x4d_b32x8_imagenet.py b/openmmlab_test/mmclassification-0.24.1/configs/resnext/resnext152-32x4d_8xb32_in1k.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/resnext/resnext152_32x4d_b32x8_imagenet.py
rename to openmmlab_test/mmclassification-0.24.1/configs/resnext/resnext152-32x4d_8xb32_in1k.py
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/resnext/resnext152_32x4d_b32x8_imagenet.py b/openmmlab_test/mmclassification-0.24.1/configs/resnext/resnext152_32x4d_b32x8_imagenet.py
new file mode 100644
index 00000000..6d05c8b3
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/resnext/resnext152_32x4d_b32x8_imagenet.py
@@ -0,0 +1,6 @@
+_base_ = 'resnext152-32x4d_8xb32_in1k.py'
+
+_deprecation_ = dict(
+ expected='resnext152-32x4d_8xb32_in1k.py',
+ reference='https://github.com/open-mmlab/mmclassification/pull/508',
+)
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/resnext/resnext50_32x4d_b32x8_imagenet.py b/openmmlab_test/mmclassification-0.24.1/configs/resnext/resnext50-32x4d_8xb32_in1k.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/resnext/resnext50_32x4d_b32x8_imagenet.py
rename to openmmlab_test/mmclassification-0.24.1/configs/resnext/resnext50-32x4d_8xb32_in1k.py
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/resnext/resnext50_32x4d_b32x8_imagenet.py b/openmmlab_test/mmclassification-0.24.1/configs/resnext/resnext50_32x4d_b32x8_imagenet.py
new file mode 100644
index 00000000..92ae0639
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/resnext/resnext50_32x4d_b32x8_imagenet.py
@@ -0,0 +1,6 @@
+_base_ = 'resnext50-32x4d_8xb32_in1k.py'
+
+_deprecation_ = dict(
+ expected='resnext50-32x4d_8xb32_in1k.py',
+ reference='https://github.com/open-mmlab/mmclassification/pull/508',
+)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/seresnet/README.md b/openmmlab_test/mmclassification-0.24.1/configs/seresnet/README.md
new file mode 100644
index 00000000..ccfd1d15
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/seresnet/README.md
@@ -0,0 +1,34 @@
+# SE-ResNet
+
+> [Squeeze-and-Excitation Networks](https://openaccess.thecvf.com/content_cvpr_2018/html/Hu_Squeeze-and-Excitation_Networks_CVPR_2018_paper.html)
+
+
+
+## Abstract
+
+The central building block of convolutional neural networks (CNNs) is the convolution operator, which enables networks to construct informative features by fusing both spatial and channel-wise information within local receptive fields at each layer. A broad range of prior research has investigated the spatial component of this relationship, seeking to strengthen the representational power of a CNN by enhancing the quality of spatial encodings throughout its feature hierarchy. In this work, we focus instead on the channel relationship and propose a novel architectural unit, which we term the "Squeeze-and-Excitation" (SE) block, that adaptively recalibrates channel-wise feature responses by explicitly modelling interdependencies between channels. We show that these blocks can be stacked together to form SENet architectures that generalise extremely effectively across different datasets. We further demonstrate that SE blocks bring significant improvements in performance for existing state-of-the-art CNNs at slight additional computational cost. Squeeze-and-Excitation Networks formed the foundation of our ILSVRC 2017 classification submission which won first place and reduced the top-5 error to 2.251%, surpassing the winning entry of 2016 by a relative improvement of ~25%.
+
+
+

+
+
+## Results and models
+
+### ImageNet-1k
+
+| Model | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download |
+| :-----------: | :-------: | :------: | :-------: | :-------: | :-------------------------------------------------------------------------: | :---------------------------------------------------------------------------: |
+| SE-ResNet-50 | 28.09 | 4.13 | 77.74 | 93.84 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/seresnet/seresnet50_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/se-resnet/se-resnet50_batch256_imagenet_20200804-ae206104.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/se-resnet/se-resnet50_batch256_imagenet_20200708-657b3c36.log.json) |
+| SE-ResNet-101 | 49.33 | 7.86 | 78.26 | 94.07 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/seresnet/seresnet101_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/se-resnet/se-resnet101_batch256_imagenet_20200804-ba5b51d4.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/se-resnet/se-resnet101_batch256_imagenet_20200708-038a4d04.log.json) |
+
+## Citation
+
+```
+@inproceedings{hu2018squeeze,
+ title={Squeeze-and-excitation networks},
+ author={Hu, Jie and Shen, Li and Sun, Gang},
+ booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition},
+ pages={7132--7141},
+ year={2018}
+}
+```
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/seresnet/metafile.yml b/openmmlab_test/mmclassification-0.24.1/configs/seresnet/metafile.yml
new file mode 100644
index 00000000..7d2a3810
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/seresnet/metafile.yml
@@ -0,0 +1,47 @@
+Collections:
+ - Name: SEResNet
+ Metadata:
+ Training Data: ImageNet-1k
+ Training Techniques:
+ - SGD with Momentum
+ - Weight Decay
+ Training Resources: 8x V100 GPUs
+ Epochs: 140
+ Batch Size: 256
+ Architecture:
+ - ResNet
+ Paper:
+ URL: https://openaccess.thecvf.com/content_cvpr_2018/html/Hu_Squeeze-and-Excitation_Networks_CVPR_2018_paper.html
+ Title: "Squeeze-and-Excitation Networks"
+ README: configs/seresnet/README.md
+ Code:
+ URL: https://github.com/open-mmlab/mmclassification/blob/v0.15.0/mmcls/models/backbones/seresnet.py#L58
+ Version: v0.15.0
+
+Models:
+ - Name: seresnet50_8xb32_in1k
+ Metadata:
+ FLOPs: 4130000000
+ Parameters: 28090000
+ In Collection: SEResNet
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 77.74
+ Top 5 Accuracy: 93.84
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/se-resnet/se-resnet50_batch256_imagenet_20200804-ae206104.pth
+ Config: configs/seresnet/seresnet50_8xb32_in1k.py
+ - Name: seresnet101_8xb32_in1k
+ Metadata:
+ FLOPs: 7860000000
+ Parameters: 49330000
+ In Collection: SEResNet
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 78.26
+ Top 5 Accuracy: 94.07
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/se-resnet/se-resnet101_batch256_imagenet_20200804-ba5b51d4.pth
+ Config: configs/seresnet/seresnet101_8xb32_in1k.py
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/seresnet/seresnet101_b32x8_imagenet.py b/openmmlab_test/mmclassification-0.24.1/configs/seresnet/seresnet101_8xb32_in1k.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/seresnet/seresnet101_b32x8_imagenet.py
rename to openmmlab_test/mmclassification-0.24.1/configs/seresnet/seresnet101_8xb32_in1k.py
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/seresnet/seresnet101_b32x8_imagenet.py b/openmmlab_test/mmclassification-0.24.1/configs/seresnet/seresnet101_b32x8_imagenet.py
new file mode 100644
index 00000000..46daa09a
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/seresnet/seresnet101_b32x8_imagenet.py
@@ -0,0 +1,6 @@
+_base_ = 'seresnet101_8xb32_in1k.py'
+
+_deprecation_ = dict(
+ expected='seresnet101_8xb32_in1k.py',
+ reference='https://github.com/open-mmlab/mmclassification/pull/508',
+)
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/seresnet/seresnet50_b32x8_imagenet.py b/openmmlab_test/mmclassification-0.24.1/configs/seresnet/seresnet50_8xb32_in1k.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/seresnet/seresnet50_b32x8_imagenet.py
rename to openmmlab_test/mmclassification-0.24.1/configs/seresnet/seresnet50_8xb32_in1k.py
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/seresnet/seresnet50_b32x8_imagenet.py b/openmmlab_test/mmclassification-0.24.1/configs/seresnet/seresnet50_b32x8_imagenet.py
new file mode 100644
index 00000000..0fb9df39
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/seresnet/seresnet50_b32x8_imagenet.py
@@ -0,0 +1,6 @@
+_base_ = 'seresnet50_8xb32_in1k.py'
+
+_deprecation_ = dict(
+ expected='seresnet50_8xb32_in1k.py',
+ reference='https://github.com/open-mmlab/mmclassification/pull/508',
+)
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/seresnext/seresnext101_32x4d_b32x8_imagenet.py b/openmmlab_test/mmclassification-0.24.1/configs/seresnet/seresnext101-32x4d_8xb32_in1k.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/seresnext/seresnext101_32x4d_b32x8_imagenet.py
rename to openmmlab_test/mmclassification-0.24.1/configs/seresnet/seresnext101-32x4d_8xb32_in1k.py
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/seresnet/seresnext101_32x4d_b32x8_imagenet.py b/openmmlab_test/mmclassification-0.24.1/configs/seresnet/seresnext101_32x4d_b32x8_imagenet.py
new file mode 100644
index 00000000..cb99ec66
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/seresnet/seresnext101_32x4d_b32x8_imagenet.py
@@ -0,0 +1,6 @@
+_base_ = 'seresnext101-32x4d_8xb32_in1k.py'
+
+_deprecation_ = dict(
+ expected='seresnext101-32x4d_8xb32_in1k.py',
+ reference='https://github.com/open-mmlab/mmclassification/pull/508',
+)
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/seresnext/seresnext50_32x4d_b32x8_imagenet.py b/openmmlab_test/mmclassification-0.24.1/configs/seresnet/seresnext50-32x4d_8xb32_in1k.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/seresnext/seresnext50_32x4d_b32x8_imagenet.py
rename to openmmlab_test/mmclassification-0.24.1/configs/seresnet/seresnext50-32x4d_8xb32_in1k.py
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/seresnet/seresnext50_32x4d_b32x8_imagenet.py b/openmmlab_test/mmclassification-0.24.1/configs/seresnet/seresnext50_32x4d_b32x8_imagenet.py
new file mode 100644
index 00000000..49229604
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/seresnet/seresnext50_32x4d_b32x8_imagenet.py
@@ -0,0 +1,6 @@
+_base_ = 'seresnext50-32x4d_8xb32_in1k.py'
+
+_deprecation_ = dict(
+ expected='seresnext50-32x4d_8xb32_in1k.py',
+ reference='https://github.com/open-mmlab/mmclassification/pull/508',
+)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/shufflenet_v1/README.md b/openmmlab_test/mmclassification-0.24.1/configs/shufflenet_v1/README.md
new file mode 100644
index 00000000..fd131279
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/shufflenet_v1/README.md
@@ -0,0 +1,33 @@
+# ShuffleNet V1
+
+> [ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices](https://openaccess.thecvf.com/content_cvpr_2018/html/Zhang_ShuffleNet_An_Extremely_CVPR_2018_paper.html)
+
+
+
+## Abstract
+
+We introduce an extremely computation-efficient CNN architecture named ShuffleNet, which is designed specially for mobile devices with very limited computing power (e.g., 10-150 MFLOPs). The new architecture utilizes two new operations, pointwise group convolution and channel shuffle, to greatly reduce computation cost while maintaining accuracy. Experiments on ImageNet classification and MS COCO object detection demonstrate the superior performance of ShuffleNet over other structures, e.g. lower top-1 error (absolute 7.8%) than recent MobileNet on ImageNet classification task, under the computation budget of 40 MFLOPs. On an ARM-based mobile device, ShuffleNet achieves ~13x actual speedup over AlexNet while maintaining comparable accuracy.
+
+
+

+
+
+## Results and models
+
+### ImageNet-1k
+
+| Model | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download |
+| :-------------------------: | :-------: | :------: | :-------: | :-------: | :------------------------------------------------------------------: | :--------------------------------------------------------------------: |
+| ShuffleNetV1 1.0x (group=3) | 1.87 | 0.146 | 68.13 | 87.81 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/shufflenet_v1/shufflenet-v1-1x_16xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/shufflenet_v1/shufflenet_v1_batch1024_imagenet_20200804-5d6cec73.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/shufflenet_v1/shufflenet_v1_batch1024_imagenet_20200804-5d6cec73.log.json) |
+
+## Citation
+
+```
+@inproceedings{zhang2018shufflenet,
+ title={Shufflenet: An extremely efficient convolutional neural network for mobile devices},
+ author={Zhang, Xiangyu and Zhou, Xinyu and Lin, Mengxiao and Sun, Jian},
+ booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition},
+ pages={6848--6856},
+ year={2018}
+}
+```
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/shufflenet_v1/metafile.yml b/openmmlab_test/mmclassification-0.24.1/configs/shufflenet_v1/metafile.yml
new file mode 100644
index 00000000..2cfffa10
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/shufflenet_v1/metafile.yml
@@ -0,0 +1,35 @@
+Collections:
+ - Name: Shufflenet V1
+ Metadata:
+ Training Data: ImageNet-1k
+ Training Techniques:
+ - SGD with Momentum
+ - Weight Decay
+ - No BN decay
+ Training Resources: 8x 1080 GPUs
+ Epochs: 300
+ Batch Size: 1024
+ Architecture:
+ - Shufflenet V1
+ Paper:
+ URL: https://openaccess.thecvf.com/content_cvpr_2018/html/Zhang_ShuffleNet_An_Extremely_CVPR_2018_paper.html
+ Title: "ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices"
+ README: configs/shufflenet_v1/README.md
+ Code:
+ URL: https://github.com/open-mmlab/mmclassification/blob/v0.15.0/mmcls/models/backbones/shufflenet_v1.py#L152
+ Version: v0.15.0
+
+Models:
+ - Name: shufflenet-v1-1x_16xb64_in1k
+ Metadata:
+ FLOPs: 146000000
+ Parameters: 1870000
+ In Collection: Shufflenet V1
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 68.13
+ Top 5 Accuracy: 87.81
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/shufflenet_v1/shufflenet_v1_batch1024_imagenet_20200804-5d6cec73.pth
+ Config: configs/shufflenet_v1/shufflenet-v1-1x_16xb64_in1k.py
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/shufflenet_v1/shufflenet_v1_1x_b64x16_linearlr_bn_nowd_imagenet.py b/openmmlab_test/mmclassification-0.24.1/configs/shufflenet_v1/shufflenet-v1-1x_16xb64_in1k.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/shufflenet_v1/shufflenet_v1_1x_b64x16_linearlr_bn_nowd_imagenet.py
rename to openmmlab_test/mmclassification-0.24.1/configs/shufflenet_v1/shufflenet-v1-1x_16xb64_in1k.py
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/shufflenet_v1/shufflenet_v1_1x_b64x16_linearlr_bn_nowd_imagenet.py b/openmmlab_test/mmclassification-0.24.1/configs/shufflenet_v1/shufflenet_v1_1x_b64x16_linearlr_bn_nowd_imagenet.py
new file mode 100644
index 00000000..03121979
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/shufflenet_v1/shufflenet_v1_1x_b64x16_linearlr_bn_nowd_imagenet.py
@@ -0,0 +1,6 @@
+_base_ = 'shufflenet-v1-1x_16xb64_in1k.py'
+
+_deprecation_ = dict(
+ expected='shufflenet-v1-1x_16xb64_in1k.py',
+ reference='https://github.com/open-mmlab/mmclassification/pull/508',
+)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/shufflenet_v2/README.md b/openmmlab_test/mmclassification-0.24.1/configs/shufflenet_v2/README.md
new file mode 100644
index 00000000..78271543
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/shufflenet_v2/README.md
@@ -0,0 +1,33 @@
+# ShuffleNet V2
+
+> [Shufflenet v2: Practical guidelines for efficient cnn architecture design](https://openaccess.thecvf.com/content_ECCV_2018/papers/Ningning_Light-weight_CNN_Architecture_ECCV_2018_paper.pdf)
+
+
+
+## Abstract
+
+Currently, the neural network architecture design is mostly guided by the *indirect* metric of computation complexity, i.e., FLOPs. However, the *direct* metric, e.g., speed, also depends on the other factors such as memory access cost and platform characterics. Thus, this work proposes to evaluate the direct metric on the target platform, beyond only considering FLOPs. Based on a series of controlled experiments, this work derives several practical *guidelines* for efficient network design. Accordingly, a new architecture is presented, called *ShuffleNet V2*. Comprehensive ablation experiments verify that our model is the state-of-the-art in terms of speed and accuracy tradeoff.
+
+
+

+
+
+## Results and models
+
+### ImageNet-1k
+
+| Model | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download |
+| :---------------: | :-------: | :------: | :-------: | :-------: | :-----------------------------------------------------------------------: | :-------------------------------------------------------------------------: |
+| ShuffleNetV2 1.0x | 2.28 | 0.149 | 69.55 | 88.92 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/shufflenet_v2/shufflenet-v2-1x_16xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/shufflenet_v2/shufflenet_v2_batch1024_imagenet_20200812-5bf4721e.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/shufflenet_v2/shufflenet_v2_batch1024_imagenet_20200804-8860eec9.log.json) |
+
+## Citation
+
+```
+@inproceedings{ma2018shufflenet,
+ title={Shufflenet v2: Practical guidelines for efficient cnn architecture design},
+ author={Ma, Ningning and Zhang, Xiangyu and Zheng, Hai-Tao and Sun, Jian},
+ booktitle={Proceedings of the European conference on computer vision (ECCV)},
+ pages={116--131},
+ year={2018}
+}
+```
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/shufflenet_v2/metafile.yml b/openmmlab_test/mmclassification-0.24.1/configs/shufflenet_v2/metafile.yml
new file mode 100644
index 00000000..a06322dd
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/shufflenet_v2/metafile.yml
@@ -0,0 +1,35 @@
+Collections:
+ - Name: Shufflenet V2
+ Metadata:
+ Training Data: ImageNet-1k
+ Training Techniques:
+ - SGD with Momentum
+ - Weight Decay
+ - No BN decay
+ Training Resources: 8x 1080 GPUs
+ Epochs: 300
+ Batch Size: 1024
+ Architecture:
+ - Shufflenet V2
+ Paper:
+ URL: https://openaccess.thecvf.com/content_ECCV_2018/papers/Ningning_Light-weight_CNN_Architecture_ECCV_2018_paper.pdf
+ Title: "ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design"
+ README: configs/shufflenet_v2/README.md
+ Code:
+ URL: https://github.com/open-mmlab/mmclassification/blob/v0.15.0/mmcls/models/backbones/shufflenet_v2.py#L134
+ Version: v0.15.0
+
+Models:
+ - Name: shufflenet-v2-1x_16xb64_in1k
+ Metadata:
+ FLOPs: 149000000
+ Parameters: 2280000
+ In Collection: Shufflenet V2
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 69.55
+ Top 5 Accuracy: 88.92
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/shufflenet_v2/shufflenet_v2_batch1024_imagenet_20200812-5bf4721e.pth
+ Config: configs/shufflenet_v2/shufflenet-v2-1x_16xb64_in1k.py
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/shufflenet_v2/shufflenet-v2-1x_16xb64_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/shufflenet_v2/shufflenet-v2-1x_16xb64_in1k.py
new file mode 100644
index 00000000..b43bd34d
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/shufflenet_v2/shufflenet-v2-1x_16xb64_in1k.py
@@ -0,0 +1,8 @@
+_base_ = [
+ '../_base_/models/shufflenet_v2_1x.py',
+ '../_base_/datasets/imagenet_bs64_pil_resize.py',
+ '../_base_/schedules/imagenet_bs1024_linearlr_bn_nowd.py',
+ '../_base_/default_runtime.py'
+]
+
+fp16 = dict(loss_scale=512.)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/shufflenet_v2/shufflenet_v2_1x_b64x16_linearlr_bn_nowd_imagenet.py b/openmmlab_test/mmclassification-0.24.1/configs/shufflenet_v2/shufflenet_v2_1x_b64x16_linearlr_bn_nowd_imagenet.py
new file mode 100644
index 00000000..c0938b09
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/shufflenet_v2/shufflenet_v2_1x_b64x16_linearlr_bn_nowd_imagenet.py
@@ -0,0 +1,6 @@
+_base_ = 'shufflenet-v2-1x_16xb64_in1k.py'
+
+_deprecation_ = dict(
+ expected='shufflenet-v2-1x_16xb64_in1k.py',
+ reference='https://github.com/open-mmlab/mmclassification/pull/508',
+)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/swin_transformer/README.md b/openmmlab_test/mmclassification-0.24.1/configs/swin_transformer/README.md
new file mode 100644
index 00000000..86975ec8
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/swin_transformer/README.md
@@ -0,0 +1,60 @@
+# Swin Transformer
+
+> [Swin Transformer: Hierarchical Vision Transformer using Shifted Windows](https://arxiv.org/pdf/2103.14030.pdf)
+
+
+
+## Abstract
+
+This paper presents a new vision Transformer, called Swin Transformer, that capably serves as a general-purpose backbone for computer vision. Challenges in adapting Transformer from language to vision arise from differences between the two domains, such as large variations in the scale of visual entities and the high resolution of pixels in images compared to words in text. To address these differences, we propose a hierarchical Transformer whose representation is computed with **S**hifted **win**dows. The shifted windowing scheme brings greater efficiency by limiting self-attention computation to non-overlapping local windows while also allowing for cross-window connection. This hierarchical architecture has the flexibility to model at various scales and has linear computational complexity with respect to image size. These qualities of Swin Transformer make it compatible with a broad range of vision tasks, including image classification (87.3 top-1 accuracy on ImageNet-1K) and dense prediction tasks such as object detection (58.7 box AP and 51.1 mask AP on COCO test-dev) and semantic segmentation (53.5 mIoU on ADE20K val). Its performance surpasses the previous state-of-the-art by a large margin of +2.7 box AP and +2.6 mask AP on COCO, and +3.2 mIoU on ADE20K, demonstrating the potential of Transformer-based models as vision backbones. The hierarchical design and the shifted window approach also prove beneficial for all-MLP architectures.
+
+
+

+
+
+## Results and models
+
+### ImageNet-21k
+
+The pre-trained models on ImageNet-21k are used to fine-tune, and therefore don't have evaluation results.
+
+| Model | resolution | Params(M) | Flops(G) | Download |
+| :----: | :--------: | :-------: | :------: | :---------------------------------------------------------------------------------------------------------------------: |
+| Swin-B | 224x224 | 86.74 | 15.14 | [model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin-base_3rdparty_in21k.pth) |
+| Swin-B | 384x384 | 86.88 | 44.49 | [model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin-base_3rdparty_in21k-384px.pth) |
+| Swin-L | 224x224 | 195.00 | 34.04 | [model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin-large_3rdparty_in21k.pth) |
+| Swin-L | 384x384 | 195.20 | 100.04 | [model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin-base_3rdparty_in21k-384px.pth) |
+
+### ImageNet-1k
+
+| Model | Pretrain | resolution | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download |
+| :------: | :----------: | :--------: | :-------: | :------: | :-------: | :-------: | :----------------------------------------------------------------: | :-------------------------------------------------------------------: |
+| Swin-T | From scratch | 224x224 | 28.29 | 4.36 | 81.18 | 95.61 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/swin_transformer/swin-tiny_16xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/swin_tiny_224_b16x64_300e_imagenet_20210616_090925-66df6be6.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/swin-transformer/swin_tiny_224_b16x64_300e_imagenet_20210616_090925.log.json) |
+| Swin-S | From scratch | 224x224 | 49.61 | 8.52 | 83.02 | 96.29 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/swin_transformer/swin-small_16xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/swin_small_224_b16x64_300e_imagenet_20210615_110219-7f9d988b.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/swin-transformer/swin_small_224_b16x64_300e_imagenet_20210615_110219.log.json) |
+| Swin-B | From scratch | 224x224 | 87.77 | 15.14 | 83.36 | 96.44 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/swin_transformer/swin_base_224_b16x64_300e_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/swin_base_224_b16x64_300e_imagenet_20210616_190742-93230b0d.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/swin-transformer/swin_base_224_b16x64_300e_imagenet_20210616_190742.log.json) |
+| Swin-S\* | From scratch | 224x224 | 49.61 | 8.52 | 83.21 | 96.25 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/swin_transformer/swin-small_16xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin_small_patch4_window7_224-cc7a01c9.pth) |
+| Swin-B\* | From scratch | 224x224 | 87.77 | 15.14 | 83.42 | 96.44 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/swin_transformer/swin-base_16xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin_base_patch4_window7_224-4670dd19.pth) |
+| Swin-B\* | From scratch | 384x384 | 87.90 | 44.49 | 84.49 | 96.95 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/swin_transformer/swin-base_16xb64_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin_base_patch4_window12_384-02c598a4.pth) |
+| Swin-B\* | ImageNet-21k | 224x224 | 87.77 | 15.14 | 85.16 | 97.50 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/swin_transformer/swin-base_16xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin_base_patch4_window7_224_22kto1k-f967f799.pth) |
+| Swin-B\* | ImageNet-21k | 384x384 | 87.90 | 44.49 | 86.44 | 98.05 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/swin_transformer/swin-base_16xb64_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin_base_patch4_window12_384_22kto1k-d59b0d1d.pth) |
+| Swin-L\* | ImageNet-21k | 224x224 | 196.53 | 34.04 | 86.24 | 97.88 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/swin_transformer/swin-large_16xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin_large_patch4_window7_224_22kto1k-5f0996db.pth) |
+| Swin-L\* | ImageNet-21k | 384x384 | 196.74 | 100.04 | 87.25 | 98.25 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/swin_transformer/swin-large_16xb64_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin_large_patch4_window12_384_22kto1k-0a40944b.pth) |
+
+*Models with * are converted from the [official repo](https://github.com/microsoft/Swin-Transformer#main-results-on-imagenet-with-pretrained-models). The config files of these models are only for validation. We don't ensure these config files' training accuracy and welcome you to contribute your reproduction results.*
+
+### CUB-200-2011
+
+| Model | Pretrain | resolution | Params(M) | Flops(G) | Top-1 (%) | Config | Download |
+| :----: | :---------------------------------------------------: | :--------: | :-------: | :------: | :-------: | :-------------------------------------------------: | :----------------------------------------------------: |
+| Swin-L | [ImageNet-21k](https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin-base_3rdparty_in21k-384px.pth) | 384x384 | 195.51 | 100.04 | 91.87 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/swin_transformer/swin-large_8xb8_cub_384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/swin-large_8xb8_cub_384px_20220307-1bbaee6a.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/swin-transformer/swin-large_8xb8_cub_384px_20220307-1bbaee6a.log.json) |
+
+## Citation
+
+```
+@article{liu2021Swin,
+ title={Swin Transformer: Hierarchical Vision Transformer using Shifted Windows},
+ author={Liu, Ze and Lin, Yutong and Cao, Yue and Hu, Han and Wei, Yixuan and Zhang, Zheng and Lin, Stephen and Guo, Baining},
+ journal={arXiv preprint arXiv:2103.14030},
+ year={2021}
+}
+```
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/swin_transformer/metafile.yml b/openmmlab_test/mmclassification-0.24.1/configs/swin_transformer/metafile.yml
new file mode 100644
index 00000000..b44c1ba8
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/swin_transformer/metafile.yml
@@ -0,0 +1,201 @@
+Collections:
+ - Name: Swin-Transformer
+ Metadata:
+ Training Data: ImageNet-1k
+ Training Techniques:
+ - AdamW
+ - Weight Decay
+ Training Resources: 16x V100 GPUs
+ Epochs: 300
+ Batch Size: 1024
+ Architecture:
+ - Shift Window Multihead Self Attention
+ Paper:
+ URL: https://arxiv.org/pdf/2103.14030.pdf
+ Title: "Swin Transformer: Hierarchical Vision Transformer using Shifted Windows"
+ README: configs/swin_transformer/README.md
+ Code:
+ URL: https://github.com/open-mmlab/mmclassification/blob/v0.15.0/mmcls/models/backbones/swin_transformer.py#L176
+ Version: v0.15.0
+
+Models:
+ - Name: swin-tiny_16xb64_in1k
+ Metadata:
+ FLOPs: 4360000000
+ Parameters: 28290000
+ In Collection: Swin-Transformer
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 81.18
+ Top 5 Accuracy: 95.61
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/swin-transformer/swin_tiny_224_b16x64_300e_imagenet_20210616_090925-66df6be6.pth
+ Config: configs/swin_transformer/swin-tiny_16xb64_in1k.py
+ - Name: swin-small_16xb64_in1k
+ Metadata:
+ FLOPs: 8520000000
+ Parameters: 49610000
+ In Collection: Swin-Transformer
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 83.02
+ Top 5 Accuracy: 96.29
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/swin-transformer/swin_small_224_b16x64_300e_imagenet_20210615_110219-7f9d988b.pth
+ Config: configs/swin_transformer/swin-small_16xb64_in1k.py
+ - Name: swin-base_16xb64_in1k
+ Metadata:
+ FLOPs: 15140000000
+ Parameters: 87770000
+ In Collection: Swin-Transformer
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 83.36
+ Top 5 Accuracy: 96.44
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/swin-transformer/swin_base_224_b16x64_300e_imagenet_20210616_190742-93230b0d.pth
+ Config: configs/swin_transformer/swin-base_16xb64_in1k.py
+ - Name: swin-tiny_3rdparty_in1k
+ Metadata:
+ FLOPs: 4360000000
+ Parameters: 28290000
+ In Collection: Swin-Transformer
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 81.18
+ Top 5 Accuracy: 95.52
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin_tiny_patch4_window7_224-160bb0a5.pth
+ Converted From:
+ Weights: https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth
+ Code: https://github.com/microsoft/Swin-Transformer/blob/777f6c66604bb5579086c4447efe3620344d95a9/models/swin_transformer.py#L458
+ Config: configs/swin_transformer/swin-tiny_16xb64_in1k.py
+ - Name: swin-small_3rdparty_in1k
+ Metadata:
+ FLOPs: 8520000000
+ Parameters: 49610000
+ In Collection: Swin-Transformer
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 83.21
+ Top 5 Accuracy: 96.25
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin_small_patch4_window7_224-cc7a01c9.pth
+ Converted From:
+ Weights: https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth
+ Code: https://github.com/microsoft/Swin-Transformer/blob/777f6c66604bb5579086c4447efe3620344d95a9/models/swin_transformer.py#L458
+ Config: configs/swin_transformer/swin-small_16xb64_in1k.py
+ - Name: swin-base_3rdparty_in1k
+ Metadata:
+ FLOPs: 15140000000
+ Parameters: 87770000
+ In Collection: Swin-Transformer
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 83.42
+ Top 5 Accuracy: 96.44
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin_base_patch4_window7_224-4670dd19.pth
+ Converted From:
+ Weights: https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224.pth
+ Code: https://github.com/microsoft/Swin-Transformer/blob/777f6c66604bb5579086c4447efe3620344d95a9/models/swin_transformer.py#L458
+ Config: configs/swin_transformer/swin-base_16xb64_in1k.py
+ - Name: swin-base_3rdparty_in1k-384
+ Metadata:
+ FLOPs: 44490000000
+ Parameters: 87900000
+ In Collection: Swin-Transformer
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 84.49
+ Top 5 Accuracy: 96.95
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin_base_patch4_window12_384-02c598a4.pth
+ Converted From:
+ Weights: https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384.pth
+ Code: https://github.com/microsoft/Swin-Transformer/blob/777f6c66604bb5579086c4447efe3620344d95a9/models/swin_transformer.py#L458
+ Config: configs/swin_transformer/swin-base_16xb64_in1k-384px.py
+ - Name: swin-base_in21k-pre-3rdparty_in1k
+ Metadata:
+ FLOPs: 15140000000
+ Parameters: 87770000
+ In Collection: Swin-Transformer
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 85.16
+ Top 5 Accuracy: 97.50
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin_base_patch4_window7_224_22kto1k-f967f799.pth
+ Converted From:
+ Weights: https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224_22kto1k.pth
+ Code: https://github.com/microsoft/Swin-Transformer/blob/777f6c66604bb5579086c4447efe3620344d95a9/models/swin_transformer.py#L458
+ Config: configs/swin_transformer/swin-base_16xb64_in1k.py
+ - Name: swin-base_in21k-pre-3rdparty_in1k-384
+ Metadata:
+ FLOPs: 44490000000
+ Parameters: 87900000
+ In Collection: Swin-Transformer
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 86.44
+ Top 5 Accuracy: 98.05
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin_base_patch4_window12_384_22kto1k-d59b0d1d.pth
+ Converted From:
+ Weights: https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384_22kto1k.pth
+ Code: https://github.com/microsoft/Swin-Transformer/blob/777f6c66604bb5579086c4447efe3620344d95a9/models/swin_transformer.py#L458
+ Config: configs/swin_transformer/swin-base_16xb64_in1k-384px.py
+ - Name: swin-large_in21k-pre-3rdparty_in1k
+ Metadata:
+ FLOPs: 34040000000
+ Parameters: 196530000
+ In Collection: Swin-Transformer
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 86.24
+ Top 5 Accuracy: 97.88
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin_large_patch4_window7_224_22kto1k-5f0996db.pth
+ Converted From:
+ Weights: https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window7_224_22kto1k.pth
+ Code: https://github.com/microsoft/Swin-Transformer/blob/777f6c66604bb5579086c4447efe3620344d95a9/models/swin_transformer.py#L458
+ Config: configs/swin_transformer/swin-large_16xb64_in1k.py
+ - Name: swin-large_in21k-pre-3rdparty_in1k-384
+ Metadata:
+ FLOPs: 100040000000
+ Parameters: 196740000
+ In Collection: Swin-Transformer
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 87.25
+ Top 5 Accuracy: 98.25
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin_large_patch4_window12_384_22kto1k-0a40944b.pth
+ Converted From:
+ Weights: https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22kto1k.pth
+ Code: https://github.com/microsoft/Swin-Transformer/blob/777f6c66604bb5579086c4447efe3620344d95a9/models/swin_transformer.py#L458
+ Config: configs/swin_transformer/swin-large_16xb64_in1k-384px.py
+ - Name: swin-large_8xb8_cub_384px
+ Metadata:
+ FLOPs: 100040000000
+ Parameters: 195510000
+ In Collection: Swin-Transformer
+ Results:
+ - Dataset: CUB-200-2011
+ Metrics:
+ Top 1 Accuracy: 91.87
+ Task: Image Classification
+ Pretrain: https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin-large_3rdparty_in21k-384px.pth
+ Weights: https://download.openmmlab.com/mmclassification/v0/swin-transformer/swin-large_8xb8_cub_384px_20220307-1bbaee6a.pth
+ Config: configs/swin_transformer/swin-large_8xb8_cub_384px.py
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/swin_transformer/swin-base_16xb64_in1k-384px.py b/openmmlab_test/mmclassification-0.24.1/configs/swin_transformer/swin-base_16xb64_in1k-384px.py
new file mode 100644
index 00000000..711a0d6d
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/swin_transformer/swin-base_16xb64_in1k-384px.py
@@ -0,0 +1,7 @@
+# Only for evaluation
+_base_ = [
+ '../_base_/models/swin_transformer/base_384.py',
+ '../_base_/datasets/imagenet_bs64_swin_384.py',
+ '../_base_/schedules/imagenet_bs1024_adamw_swin.py',
+ '../_base_/default_runtime.py'
+]
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/swin_transformer/swin-base_16xb64_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/swin_transformer/swin-base_16xb64_in1k.py
new file mode 100644
index 00000000..2a4548af
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/swin_transformer/swin-base_16xb64_in1k.py
@@ -0,0 +1,6 @@
+_base_ = [
+ '../_base_/models/swin_transformer/base_224.py',
+ '../_base_/datasets/imagenet_bs64_swin_224.py',
+ '../_base_/schedules/imagenet_bs1024_adamw_swin.py',
+ '../_base_/default_runtime.py'
+]
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/swin_transformer/swin-large_16xb64_in1k-384px.py b/openmmlab_test/mmclassification-0.24.1/configs/swin_transformer/swin-large_16xb64_in1k-384px.py
new file mode 100644
index 00000000..a7f0ad27
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/swin_transformer/swin-large_16xb64_in1k-384px.py
@@ -0,0 +1,7 @@
+# Only for evaluation
+_base_ = [
+ '../_base_/models/swin_transformer/large_384.py',
+ '../_base_/datasets/imagenet_bs64_swin_384.py',
+ '../_base_/schedules/imagenet_bs1024_adamw_swin.py',
+ '../_base_/default_runtime.py'
+]
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/swin_transformer/swin-large_16xb64_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/swin_transformer/swin-large_16xb64_in1k.py
new file mode 100644
index 00000000..4e875c59
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/swin_transformer/swin-large_16xb64_in1k.py
@@ -0,0 +1,7 @@
+# Only for evaluation
+_base_ = [
+ '../_base_/models/swin_transformer/large_224.py',
+ '../_base_/datasets/imagenet_bs64_swin_224.py',
+ '../_base_/schedules/imagenet_bs1024_adamw_swin.py',
+ '../_base_/default_runtime.py'
+]
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/swin_transformer/swin-large_8xb8_cub_384px.py b/openmmlab_test/mmclassification-0.24.1/configs/swin_transformer/swin-large_8xb8_cub_384px.py
new file mode 100644
index 00000000..d1137161
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/swin_transformer/swin-large_8xb8_cub_384px.py
@@ -0,0 +1,37 @@
+_base_ = [
+ '../_base_/models/swin_transformer/large_384.py',
+ '../_base_/datasets/cub_bs8_384.py', '../_base_/schedules/cub_bs64.py',
+ '../_base_/default_runtime.py'
+]
+
+# model settings
+checkpoint = 'https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin-large_3rdparty_in21k-384px.pth' # noqa
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ init_cfg=dict(
+ type='Pretrained', checkpoint=checkpoint, prefix='backbone')),
+ head=dict(num_classes=200, ))
+
+paramwise_cfg = dict(
+ norm_decay_mult=0.0,
+ bias_decay_mult=0.0,
+ custom_keys={
+ '.absolute_pos_embed': dict(decay_mult=0.0),
+ '.relative_position_bias_table': dict(decay_mult=0.0)
+ })
+
+optimizer = dict(
+ _delete_=True,
+ type='AdamW',
+ lr=5e-6,
+ weight_decay=0.0005,
+ eps=1e-8,
+ betas=(0.9, 0.999),
+ paramwise_cfg=paramwise_cfg)
+optimizer_config = dict(grad_clip=dict(max_norm=5.0), _delete_=True)
+
+log_config = dict(interval=20) # log every 20 intervals
+
+checkpoint_config = dict(
+ interval=1, max_keep_ckpts=3) # save last three checkpoints
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/swin_transformer/swin-small_16xb64_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/swin_transformer/swin-small_16xb64_in1k.py
new file mode 100644
index 00000000..aa1fa21b
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/swin_transformer/swin-small_16xb64_in1k.py
@@ -0,0 +1,6 @@
+_base_ = [
+ '../_base_/models/swin_transformer/small_224.py',
+ '../_base_/datasets/imagenet_bs64_swin_224.py',
+ '../_base_/schedules/imagenet_bs1024_adamw_swin.py',
+ '../_base_/default_runtime.py'
+]
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/swin_transformer/swin-tiny_16xb64_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/swin_transformer/swin-tiny_16xb64_in1k.py
new file mode 100644
index 00000000..e1ed022a
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/swin_transformer/swin-tiny_16xb64_in1k.py
@@ -0,0 +1,6 @@
+_base_ = [
+ '../_base_/models/swin_transformer/tiny_224.py',
+ '../_base_/datasets/imagenet_bs64_swin_224.py',
+ '../_base_/schedules/imagenet_bs1024_adamw_swin.py',
+ '../_base_/default_runtime.py'
+]
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/swin_transformer/swin_base_224_b16x64_300e_imagenet.py b/openmmlab_test/mmclassification-0.24.1/configs/swin_transformer/swin_base_224_b16x64_300e_imagenet.py
new file mode 100644
index 00000000..912c379b
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/swin_transformer/swin_base_224_b16x64_300e_imagenet.py
@@ -0,0 +1,6 @@
+_base_ = 'swin-base_16xb64_in1k.py'
+
+_deprecation_ = dict(
+ expected='swin-base_16xb64_in1k.py',
+ reference='https://github.com/open-mmlab/mmclassification/pull/508',
+)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/swin_transformer/swin_base_384_evalonly_imagenet.py b/openmmlab_test/mmclassification-0.24.1/configs/swin_transformer/swin_base_384_evalonly_imagenet.py
new file mode 100644
index 00000000..9ed58889
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/swin_transformer/swin_base_384_evalonly_imagenet.py
@@ -0,0 +1,6 @@
+_base_ = 'swin-base_16xb64_in1k-384px.py'
+
+_deprecation_ = dict(
+ expected='swin-base_16xb64_in1k-384px.py',
+ reference='https://github.com/open-mmlab/mmclassification/pull/508',
+)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/swin_transformer/swin_large_224_evalonly_imagenet.py b/openmmlab_test/mmclassification-0.24.1/configs/swin_transformer/swin_large_224_evalonly_imagenet.py
new file mode 100644
index 00000000..5ebb54a5
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/swin_transformer/swin_large_224_evalonly_imagenet.py
@@ -0,0 +1,6 @@
+_base_ = 'swin-large_16xb64_in1k.py'
+
+_deprecation_ = dict(
+ expected='swin-large_16xb64_in1k.py',
+ reference='https://github.com/open-mmlab/mmclassification/pull/508',
+)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/swin_transformer/swin_large_384_evalonly_imagenet.py b/openmmlab_test/mmclassification-0.24.1/configs/swin_transformer/swin_large_384_evalonly_imagenet.py
new file mode 100644
index 00000000..9a59f5b6
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/swin_transformer/swin_large_384_evalonly_imagenet.py
@@ -0,0 +1,6 @@
+_base_ = 'swin-large_16xb64_in1k-384px.py'
+
+_deprecation_ = dict(
+ expected='swin-large_16xb64_in1k-384px.py',
+ reference='https://github.com/open-mmlab/mmclassification/pull/508',
+)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/swin_transformer/swin_small_224_b16x64_300e_imagenet.py b/openmmlab_test/mmclassification-0.24.1/configs/swin_transformer/swin_small_224_b16x64_300e_imagenet.py
new file mode 100644
index 00000000..a747aa4d
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/swin_transformer/swin_small_224_b16x64_300e_imagenet.py
@@ -0,0 +1,6 @@
+_base_ = 'swin-small_16xb64_in1k.py'
+
+_deprecation_ = dict(
+ expected='swin-small_16xb64_in1k.py',
+ reference='https://github.com/open-mmlab/mmclassification/pull/508',
+)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/swin_transformer/swin_tiny_224_b16x64_300e_imagenet.py b/openmmlab_test/mmclassification-0.24.1/configs/swin_transformer/swin_tiny_224_b16x64_300e_imagenet.py
new file mode 100644
index 00000000..2160eb91
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/swin_transformer/swin_tiny_224_b16x64_300e_imagenet.py
@@ -0,0 +1,6 @@
+_base_ = 'swin-tiny_16xb64_in1k.py'
+
+_deprecation_ = dict(
+ expected='swin-tiny_16xb64_in1k.py',
+ reference='https://github.com/open-mmlab/mmclassification/pull/508',
+)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/swin_transformer_v2/README.md b/openmmlab_test/mmclassification-0.24.1/configs/swin_transformer_v2/README.md
new file mode 100644
index 00000000..31d1aff5
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/swin_transformer_v2/README.md
@@ -0,0 +1,58 @@
+# Swin Transformer V2
+
+> [Swin Transformer V2: Scaling Up Capacity and Resolution](https://arxiv.org/abs/2111.09883.pdf)
+
+
+
+## Abstract
+
+Large-scale NLP models have been shown to significantly improve the performance on language tasks with no signs of saturation. They also demonstrate amazing few-shot capabilities like that of human beings. This paper aims to explore large-scale models in computer vision. We tackle three major issues in training and application of large vision models, including training instability, resolution gaps between pre-training and fine-tuning, and hunger on labelled data. Three main techniques are proposed: 1) a residual-post-norm method combined with cosine attention to improve training stability; 2) A log-spaced continuous position bias method to effectively transfer models pre-trained using low-resolution images to downstream tasks with high-resolution inputs; 3) A self-supervised pre-training method, SimMIM, to reduce the needs of vast labeled images. Through these techniques, this paper successfully trained a 3 billion-parameter Swin Transformer V2 model, which is the largest dense vision model to date, and makes it capable of training with images of up to 1,536×1,536 resolution. It set new performance records on 4 representative vision tasks, including ImageNet-V2 image classification, COCO object detection, ADE20K semantic segmentation, and Kinetics-400 video action classification. Also note our training is much more efficient than that in Google's billion-level visual models, which consumes 40 times less labelled data and 40 times less training time.
+
+
+

+
+
+## Results and models
+
+### ImageNet-21k
+
+The pre-trained models on ImageNet-21k are used to fine-tune, and therefore don't have evaluation results.
+
+| Model | resolution | Params(M) | Flops(G) | Download |
+| :------: | :--------: | :-------: | :------: | :--------------------------------------------------------------------------------------------------------------------------------------: |
+| Swin-B\* | 192x192 | 87.92 | 8.51 | [model](https://download.openmmlab.com/mmclassification/v0/swin-v2/pretrain/swinv2-base-w12_3rdparty_in21k-192px_20220803-f7dc9763.pth) |
+| Swin-L\* | 192x192 | 196.74 | 19.04 | [model](https://download.openmmlab.com/mmclassification/v0/swin-v2/pretrain/swinv2-large-w12_3rdparty_in21k-192px_20220803-d9073fee.pth) |
+
+### ImageNet-1k
+
+| Model | Pretrain | resolution | window | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download |
+| :------: | :----------: | :--------: | :----: | :-------: | :------: | :-------: | :-------: | :-------------------------------------------------------------: | :----------------------------------------------------------------: |
+| Swin-T\* | From scratch | 256x256 | 8x8 | 28.35 | 4.35 | 81.76 | 95.87 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/swin_transformer_v2/swinv2-tiny-w8_16xb64_in1k-256px.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-v2/swinv2-tiny-w8_3rdparty_in1k-256px_20220803-e318968f.pth) |
+| Swin-T\* | From scratch | 256x256 | 16x16 | 28.35 | 4.4 | 82.81 | 96.23 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/swin_transformer_v2/swinv2-tiny-w16_16xb64_in1k-256px.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-v2/swinv2-tiny-w16_3rdparty_in1k-256px_20220803-9651cdd7.pth) |
+| Swin-S\* | From scratch | 256x256 | 8x8 | 49.73 | 8.45 | 83.74 | 96.6 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/swin_transformer_v2/swinv2-small-w8_16xb64_in1k-256px.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-v2/swinv2-small-w8_3rdparty_in1k-256px_20220803-b01a4332.pth) |
+| Swin-S\* | From scratch | 256x256 | 16x16 | 49.73 | 8.57 | 84.13 | 96.83 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/swin_transformer_v2/swinv2-small-w16_16xb64_in1k-256px.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-v2/swinv2-small-w16_3rdparty_in1k-256px_20220803-b707d206.pth) |
+| Swin-B\* | From scratch | 256x256 | 8x8 | 87.92 | 14.99 | 84.2 | 96.86 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/swin_transformer_v2/swinv2-base-w8_16xb64_in1k-256px.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-v2/swinv2-base-w8_3rdparty_in1k-256px_20220803-8ff28f2b.pth) |
+| Swin-B\* | From scratch | 256x256 | 16x16 | 87.92 | 15.14 | 84.6 | 97.05 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/swin_transformer_v2/swinv2-base-w16_16xb64_in1k-256px.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-v2/swinv2-base-w16_3rdparty_in1k-256px_20220803-5a1886b7.pth) |
+| Swin-B\* | ImageNet-21k | 256x256 | 16x16 | 87.92 | 15.14 | 86.17 | 97.88 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/swin_transformer_v2/swinv2-base-w16_in21k-pre_16xb64_in1k-256px.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-v2/swinv2-base-w16_in21k-pre_3rdparty_in1k-256px_20220803-8d7aa8ad.pth) |
+| Swin-B\* | ImageNet-21k | 384x384 | 24x24 | 87.92 | 34.07 | 87.14 | 98.23 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/swin_transformer_v2/swinv2-base-w24_in21k-pre_16xb64_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-v2/swinv2-base-w24_in21k-pre_3rdparty_in1k-384px_20220803-44eb70f8.pth) |
+| Swin-L\* | ImageNet-21k | 256X256 | 16x16 | 196.75 | 33.86 | 86.93 | 98.06 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/swin_transformer_v2/swinv2-large-w16_in21k-pre_16xb64_in1k-256px.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-v2/swinv2-large-w16_in21k-pre_3rdparty_in1k-256px_20220803-c40cbed7.pth) |
+| Swin-L\* | ImageNet-21k | 384x384 | 24x24 | 196.75 | 76.2 | 87.59 | 98.27 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/swin_transformer_v2/swinv2-large-w24_in21k-pre_16xb64_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-v2/swinv2-large-w24_in21k-pre_3rdparty_in1k-384px_20220803-3b36c165.pth) |
+
+*Models with * are converted from the [official repo](https://github.com/microsoft/Swin-Transformer#main-results-on-imagenet-with-pretrained-models). The config files of these models are only for validation. We don't ensure these config files' training accuracy and welcome you to contribute your reproduction results.*
+
+*ImageNet-21k pretrained models with input resolution of 256x256 and 384x384 both fine-tuned from the same pre-training model using a smaller input resolution of 192x192.*
+
+## Citation
+
+```
+@article{https://doi.org/10.48550/arxiv.2111.09883,
+ doi = {10.48550/ARXIV.2111.09883},
+ url = {https://arxiv.org/abs/2111.09883},
+ author = {Liu, Ze and Hu, Han and Lin, Yutong and Yao, Zhuliang and Xie, Zhenda and Wei, Yixuan and Ning, Jia and Cao, Yue and Zhang, Zheng and Dong, Li and Wei, Furu and Guo, Baining},
+ keywords = {Computer Vision and Pattern Recognition (cs.CV), FOS: Computer and information sciences, FOS: Computer and information sciences},
+ title = {Swin Transformer V2: Scaling Up Capacity and Resolution},
+ publisher = {arXiv},
+ year = {2021},
+ copyright = {Creative Commons Attribution 4.0 International}
+}
+```
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/swin_transformer_v2/metafile.yml b/openmmlab_test/mmclassification-0.24.1/configs/swin_transformer_v2/metafile.yml
new file mode 100644
index 00000000..cef83923
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/swin_transformer_v2/metafile.yml
@@ -0,0 +1,204 @@
+Collections:
+ - Name: Swin-Transformer-V2
+ Metadata:
+ Training Data: ImageNet-1k
+ Training Techniques:
+ - AdamW
+ - Weight Decay
+ Training Resources: 16x V100 GPUs
+ Epochs: 300
+ Batch Size: 1024
+ Architecture:
+ - Shift Window Multihead Self Attention
+ Paper:
+ URL: https://arxiv.org/abs/2111.09883.pdf
+ Title: "Swin Transformer V2: Scaling Up Capacity and Resolution"
+ README: configs/swin_transformer_v2/README.md
+
+Models:
+ - Name: swinv2-tiny-w8_3rdparty_in1k-256px
+ Metadata:
+ FLOPs: 4350000000
+ Parameters: 28350000
+ In Collection: Swin-Transformer-V2
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 81.76
+ Top 5 Accuracy: 95.87
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/swin-v2/swinv2-tiny-w8_3rdparty_in1k-256px_20220803-e318968f.pth
+ Config: configs/swin_transformer_v2/swinv2-tiny-w8_16xb64_in1k-256px.py
+ Converted From:
+ Weights: https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_tiny_patch4_window8_256.pth
+ Code: https://github.com/microsoft/Swin-Transformer
+ - Name: swinv2-tiny-w16_3rdparty_in1k-256px
+ Metadata:
+ FLOPs: 4400000000
+ Parameters: 28350000
+ In Collection: Swin-Transformer-V2
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 82.81
+ Top 5 Accuracy: 96.23
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/swin-v2/swinv2-tiny-w16_3rdparty_in1k-256px_20220803-9651cdd7.pth
+ Config: configs/swin_transformer_v2/swinv2-tiny-w16_16xb64_in1k-256px.py
+ Converted From:
+ Weights: https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_tiny_patch4_window16_256.pth
+ Code: https://github.com/microsoft/Swin-Transformer
+ - Name: swinv2-small-w8_3rdparty_in1k-256px
+ Metadata:
+ FLOPs: 8450000000
+ Parameters: 49730000
+ In Collection: Swin-Transformer-V2
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 83.74
+ Top 5 Accuracy: 96.6
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/swin-v2/swinv2-small-w8_3rdparty_in1k-256px_20220803-b01a4332.pth
+ Config: configs/swin_transformer_v2/swinv2-small-w8_16xb64_in1k-256px.py
+ Converted From:
+ Weights: https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_small_patch4_window8_256.pth
+ Code: https://github.com/microsoft/Swin-Transformer
+ - Name: swinv2-small-w16_3rdparty_in1k-256px
+ Metadata:
+ FLOPs: 8570000000
+ Parameters: 49730000
+ In Collection: Swin-Transformer-V2
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 84.13
+ Top 5 Accuracy: 96.83
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/swin-v2/swinv2-small-w16_3rdparty_in1k-256px_20220803-b707d206.pth
+ Config: configs/swin_transformer_v2/swinv2-small-w16_16xb64_in1k-256px.py
+ Converted From:
+ Weights: https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_small_patch4_window16_256.pth
+ Code: https://github.com/microsoft/Swin-Transformer
+ - Name: swinv2-base-w8_3rdparty_in1k-256px
+ Metadata:
+ FLOPs: 14990000000
+ Parameters: 87920000
+ In Collection: Swin-Transformer-V2
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 84.2
+ Top 5 Accuracy: 96.86
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/swin-v2/swinv2-base-w8_3rdparty_in1k-256px_20220803-8ff28f2b.pth
+ Config: configs/swin_transformer_v2/swinv2-base-w8_16xb64_in1k-256px.py
+ Converted From:
+ Weights: https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_base_patch4_window8_256.pth
+ Code: https://github.com/microsoft/Swin-Transformer
+ - Name: swinv2-base-w16_3rdparty_in1k-256px
+ Metadata:
+ FLOPs: 15140000000
+ Parameters: 87920000
+ In Collection: Swin-Transformer-V2
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 84.6
+ Top 5 Accuracy: 97.05
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/swin-v2/swinv2-base-w16_3rdparty_in1k-256px_20220803-5a1886b7.pth
+ Config: configs/swin_transformer_v2/swinv2-base-w16_16xb64_in1k-256px.py
+ Converted From:
+ Weights: https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_base_patch4_window16_256.pth
+ Code: https://github.com/microsoft/Swin-Transformer
+ - Name: swinv2-base-w16_in21k-pre_3rdparty_in1k-256px
+ Metadata:
+ Training Data: ImageNet-21k
+ FLOPs: 15140000000
+ Parameters: 87920000
+ In Collection: Swin-Transformer-V2
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 86.17
+ Top 5 Accuracy: 97.88
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/swin-v2/swinv2-base-w16_in21k-pre_3rdparty_in1k-256px_20220803-8d7aa8ad.pth
+ Config: configs/swin_transformer_v2/swinv2-base-w16_in21k-pre_16xb64_in1k-256px.py
+ Converted From:
+ Weights: https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_base_patch4_window12to16_192to256_22kto1k_ft.pth
+ Code: https://github.com/microsoft/Swin-Transformer
+ - Name: swinv2-base-w24_in21k-pre_3rdparty_in1k-384px
+ Metadata:
+ Training Data: ImageNet-21k
+ FLOPs: 34070000000
+ Parameters: 87920000
+ In Collection: Swin-Transformer-V2
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 87.14
+ Top 5 Accuracy: 98.23
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/swin-v2/swinv2-base-w24_in21k-pre_3rdparty_in1k-384px_20220803-44eb70f8.pth
+ Config: configs/swin_transformer_v2/swinv2-base-w24_in21k-pre_16xb64_in1k-384px.py
+ Converted From:
+ Weights: https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_base_patch4_window12to24_192to384_22kto1k_ft.pth
+ Code: https://github.com/microsoft/Swin-Transformer
+ - Name: swinv2-large-w16_in21k-pre_3rdparty_in1k-256px
+ Metadata:
+ Training Data: ImageNet-21k
+ FLOPs: 33860000000
+ Parameters: 196750000
+ In Collection: Swin-Transformer-V2
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 86.93
+ Top 5 Accuracy: 98.06
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/swin-v2/swinv2-large-w16_in21k-pre_3rdparty_in1k-256px_20220803-c40cbed7.pth
+ Config: configs/swin_transformer_v2/swinv2-large-w16_in21k-pre_16xb64_in1k-256px.py
+ Converted From:
+ Weights: https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_large_patch4_window12to16_192to256_22kto1k_ft.pth
+ Code: https://github.com/microsoft/Swin-Transformer
+ - Name: swinv2-large-w24_in21k-pre_3rdparty_in1k-384px
+ Metadata:
+ Training Data: ImageNet-21k
+ FLOPs: 76200000000
+ Parameters: 196750000
+ In Collection: Swin-Transformer-V2
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 87.59
+ Top 5 Accuracy: 98.27
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/swin-v2/swinv2-large-w24_in21k-pre_3rdparty_in1k-384px_20220803-3b36c165.pth
+ Config: configs/swin_transformer_v2/swinv2-large-w24_in21k-pre_16xb64_in1k-384px.py
+ Converted From:
+ Weights: https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_large_patch4_window12to24_192to384_22kto1k_ft.pth
+ Code: https://github.com/microsoft/Swin-Transformer
+ - Name: swinv2-base-w12_3rdparty_in21k-192px
+ Metadata:
+ Training Data: ImageNet-21k
+ FLOPs: 8510000000
+ Parameters: 87920000
+ In Collections: Swin-Transformer-V2
+ Results: null
+ Weights: https://download.openmmlab.com/mmclassification/v0/swin-v2/pretrain/swinv2-base-w12_3rdparty_in21k-192px_20220803-f7dc9763.pth
+ Converted From:
+ Weights: https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_base_patch4_window12_192_22k.pth
+ Code: https://github.com/microsoft/Swin-Transformer
+ - Name: swinv2-large-w12_3rdparty_in21k-192px
+ Metadata:
+ Training Data: ImageNet-21k
+ FLOPs: 19040000000
+ Parameters: 196740000
+ In Collections: Swin-Transformer-V2
+ Results: null
+ Weights: https://download.openmmlab.com/mmclassification/v0/swin-v2/pretrain/swinv2-large-w12_3rdparty_in21k-192px_20220803-d9073fee.pth
+ Converted From:
+ Weights: https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_large_patch4_window12_192_22k.pth
+ Code: https://github.com/microsoft/Swin-Transformer
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/swin_transformer_v2/swinv2-base-w16_16xb64_in1k-256px.py b/openmmlab_test/mmclassification-0.24.1/configs/swin_transformer_v2/swinv2-base-w16_16xb64_in1k-256px.py
new file mode 100644
index 00000000..5f375ee1
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/swin_transformer_v2/swinv2-base-w16_16xb64_in1k-256px.py
@@ -0,0 +1,8 @@
+_base_ = [
+ '../_base_/models/swin_transformer_v2/base_256.py',
+ '../_base_/datasets/imagenet_bs64_swin_256.py',
+ '../_base_/schedules/imagenet_bs1024_adamw_swin.py',
+ '../_base_/default_runtime.py'
+]
+
+model = dict(backbone=dict(window_size=[16, 16, 16, 8]))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/swin_transformer_v2/swinv2-base-w16_in21k-pre_16xb64_in1k-256px.py b/openmmlab_test/mmclassification-0.24.1/configs/swin_transformer_v2/swinv2-base-w16_in21k-pre_16xb64_in1k-256px.py
new file mode 100644
index 00000000..0725f9e7
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/swin_transformer_v2/swinv2-base-w16_in21k-pre_16xb64_in1k-256px.py
@@ -0,0 +1,13 @@
+_base_ = [
+ '../_base_/models/swin_transformer_v2/base_256.py',
+ '../_base_/datasets/imagenet_bs64_swin_256.py',
+ '../_base_/schedules/imagenet_bs1024_adamw_swin.py',
+ '../_base_/default_runtime.py'
+]
+
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ window_size=[16, 16, 16, 8],
+ drop_path_rate=0.2,
+ pretrained_window_sizes=[12, 12, 12, 6]))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/swin_transformer_v2/swinv2-base-w24_in21k-pre_16xb64_in1k-384px.py b/openmmlab_test/mmclassification-0.24.1/configs/swin_transformer_v2/swinv2-base-w24_in21k-pre_16xb64_in1k-384px.py
new file mode 100644
index 00000000..3dd4e5fd
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/swin_transformer_v2/swinv2-base-w24_in21k-pre_16xb64_in1k-384px.py
@@ -0,0 +1,14 @@
+_base_ = [
+ '../_base_/models/swin_transformer_v2/base_384.py',
+ '../_base_/datasets/imagenet_bs64_swin_384.py',
+ '../_base_/schedules/imagenet_bs1024_adamw_swin.py',
+ '../_base_/default_runtime.py'
+]
+
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ img_size=384,
+ window_size=[24, 24, 24, 12],
+ drop_path_rate=0.2,
+ pretrained_window_sizes=[12, 12, 12, 6]))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/swin_transformer_v2/swinv2-base-w8_16xb64_in1k-256px.py b/openmmlab_test/mmclassification-0.24.1/configs/swin_transformer_v2/swinv2-base-w8_16xb64_in1k-256px.py
new file mode 100644
index 00000000..23fc4070
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/swin_transformer_v2/swinv2-base-w8_16xb64_in1k-256px.py
@@ -0,0 +1,6 @@
+_base_ = [
+ '../_base_/models/swin_transformer_v2/base_256.py',
+ '../_base_/datasets/imagenet_bs64_swin_256.py',
+ '../_base_/schedules/imagenet_bs1024_adamw_swin.py',
+ '../_base_/default_runtime.py'
+]
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/swin_transformer_v2/swinv2-large-w16_in21k-pre_16xb64_in1k-256px.py b/openmmlab_test/mmclassification-0.24.1/configs/swin_transformer_v2/swinv2-large-w16_in21k-pre_16xb64_in1k-256px.py
new file mode 100644
index 00000000..62a2a29b
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/swin_transformer_v2/swinv2-large-w16_in21k-pre_16xb64_in1k-256px.py
@@ -0,0 +1,13 @@
+# Only for evaluation
+_base_ = [
+ '../_base_/models/swin_transformer_v2/large_256.py',
+ '../_base_/datasets/imagenet_bs64_swin_256.py',
+ '../_base_/schedules/imagenet_bs1024_adamw_swin.py',
+ '../_base_/default_runtime.py'
+]
+
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ window_size=[16, 16, 16, 8], pretrained_window_sizes=[12, 12, 12, 6]),
+)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/swin_transformer_v2/swinv2-large-w24_in21k-pre_16xb64_in1k-384px.py b/openmmlab_test/mmclassification-0.24.1/configs/swin_transformer_v2/swinv2-large-w24_in21k-pre_16xb64_in1k-384px.py
new file mode 100644
index 00000000..d97d9b2b
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/swin_transformer_v2/swinv2-large-w24_in21k-pre_16xb64_in1k-384px.py
@@ -0,0 +1,15 @@
+# Only for evaluation
+_base_ = [
+ '../_base_/models/swin_transformer_v2/large_384.py',
+ '../_base_/datasets/imagenet_bs64_swin_384.py',
+ '../_base_/schedules/imagenet_bs1024_adamw_swin.py',
+ '../_base_/default_runtime.py'
+]
+
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ img_size=384,
+ window_size=[24, 24, 24, 12],
+ pretrained_window_sizes=[12, 12, 12, 6]),
+)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/swin_transformer_v2/swinv2-small-w16_16xb64_in1k-256px.py b/openmmlab_test/mmclassification-0.24.1/configs/swin_transformer_v2/swinv2-small-w16_16xb64_in1k-256px.py
new file mode 100644
index 00000000..f87265dd
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/swin_transformer_v2/swinv2-small-w16_16xb64_in1k-256px.py
@@ -0,0 +1,8 @@
+_base_ = [
+ '../_base_/models/swin_transformer_v2/small_256.py',
+ '../_base_/datasets/imagenet_bs64_swin_256.py',
+ '../_base_/schedules/imagenet_bs1024_adamw_swin.py',
+ '../_base_/default_runtime.py'
+]
+
+model = dict(backbone=dict(window_size=[16, 16, 16, 8]))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/swin_transformer_v2/swinv2-small-w8_16xb64_in1k-256px.py b/openmmlab_test/mmclassification-0.24.1/configs/swin_transformer_v2/swinv2-small-w8_16xb64_in1k-256px.py
new file mode 100644
index 00000000..f1001f1b
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/swin_transformer_v2/swinv2-small-w8_16xb64_in1k-256px.py
@@ -0,0 +1,6 @@
+_base_ = [
+ '../_base_/models/swin_transformer_v2/small_256.py',
+ '../_base_/datasets/imagenet_bs64_swin_256.py',
+ '../_base_/schedules/imagenet_bs1024_adamw_swin.py',
+ '../_base_/default_runtime.py'
+]
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/swin_transformer_v2/swinv2-tiny-w16_16xb64_in1k-256px.py b/openmmlab_test/mmclassification-0.24.1/configs/swin_transformer_v2/swinv2-tiny-w16_16xb64_in1k-256px.py
new file mode 100644
index 00000000..7e1f290f
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/swin_transformer_v2/swinv2-tiny-w16_16xb64_in1k-256px.py
@@ -0,0 +1,8 @@
+_base_ = [
+ '../_base_/models/swin_transformer_v2/tiny_256.py',
+ '../_base_/datasets/imagenet_bs64_swin_256.py',
+ '../_base_/schedules/imagenet_bs1024_adamw_swin.py',
+ '../_base_/default_runtime.py'
+]
+
+model = dict(backbone=dict(window_size=[16, 16, 16, 8]))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/swin_transformer_v2/swinv2-tiny-w8_16xb64_in1k-256px.py b/openmmlab_test/mmclassification-0.24.1/configs/swin_transformer_v2/swinv2-tiny-w8_16xb64_in1k-256px.py
new file mode 100644
index 00000000..2cdc9a25
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/swin_transformer_v2/swinv2-tiny-w8_16xb64_in1k-256px.py
@@ -0,0 +1,6 @@
+_base_ = [
+ '../_base_/models/swin_transformer_v2/tiny_256.py',
+ '../_base_/datasets/imagenet_bs64_swin_256.py',
+ '../_base_/schedules/imagenet_bs1024_adamw_swin.py',
+ '../_base_/default_runtime.py'
+]
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/t2t_vit/README.md b/openmmlab_test/mmclassification-0.24.1/configs/t2t_vit/README.md
new file mode 100644
index 00000000..1e3a0827
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/t2t_vit/README.md
@@ -0,0 +1,36 @@
+# Tokens-to-Token ViT
+
+> [Tokens-to-Token ViT: Training Vision Transformers from Scratch on ImageNet](https://arxiv.org/abs/2101.11986)
+
+
+
+## Abstract
+
+Transformers, which are popular for language modeling, have been explored for solving vision tasks recently, \\eg, the Vision Transformer (ViT) for image classification. The ViT model splits each image into a sequence of tokens with fixed length and then applies multiple Transformer layers to model their global relation for classification. However, ViT achieves inferior performance to CNNs when trained from scratch on a midsize dataset like ImageNet. We find it is because: 1) the simple tokenization of input images fails to model the important local structure such as edges and lines among neighboring pixels, leading to low training sample efficiency; 2) the redundant attention backbone design of ViT leads to limited feature richness for fixed computation budgets and limited training samples. To overcome such limitations, we propose a new Tokens-To-Token Vision Transformer (T2T-ViT), which incorporates 1) a layer-wise Tokens-to-Token (T2T) transformation to progressively structurize the image to tokens by recursively aggregating neighboring Tokens into one Token (Tokens-to-Token), such that local structure represented by surrounding tokens can be modeled and tokens length can be reduced; 2) an efficient backbone with a deep-narrow structure for vision transformer motivated by CNN architecture design after empirical study. Notably, T2T-ViT reduces the parameter count and MACs of vanilla ViT by half, while achieving more than 3.0% improvement when trained from scratch on ImageNet. It also outperforms ResNets and achieves comparable performance with MobileNets by directly training on ImageNet. For example, T2T-ViT with comparable size to ResNet50 (21.5M parameters) can achieve 83.3% top1 accuracy in image resolution 384×384 on ImageNet.
+
+
+

+
+
+## Results and models
+
+### ImageNet-1k
+
+| Model | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download |
+| :----------: | :-------: | :------: | :-------: | :-------: | :-------------------------------------------------------------------------: | :----------------------------------------------------------------------------: |
+| T2T-ViT_t-14 | 21.47 | 4.34 | 81.83 | 95.84 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/t2t_vit/t2t-vit-t-14_8xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/t2t-vit/t2t-vit-t-14_8xb64_in1k_20211220-f7378dd5.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/t2t-vit/t2t-vit-t-14_8xb64_in1k_20211220-f7378dd5.log.json) |
+| T2T-ViT_t-19 | 39.08 | 7.80 | 82.63 | 96.18 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/t2t_vit/t2t-vit-t-19_8xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/t2t-vit/t2t-vit-t-19_8xb64_in1k_20211214-7f5e3aaf.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/t2t-vit/t2t-vit-t-19_8xb64_in1k_20211214-7f5e3aaf.log.json) |
+| T2T-ViT_t-24 | 64.00 | 12.69 | 82.71 | 96.09 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/t2t_vit/t2t-vit-t-24_8xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/t2t-vit/t2t-vit-t-24_8xb64_in1k_20211214-b2a68ae3.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/t2t-vit/t2t-vit-t-24_8xb64_in1k_20211214-b2a68ae3.log.json) |
+
+*In consistent with the [official repo](https://github.com/yitu-opensource/T2T-ViT), we adopt the best checkpoints during training.*
+
+## Citation
+
+```
+@article{yuan2021tokens,
+ title={Tokens-to-token vit: Training vision transformers from scratch on imagenet},
+ author={Yuan, Li and Chen, Yunpeng and Wang, Tao and Yu, Weihao and Shi, Yujun and Tay, Francis EH and Feng, Jiashi and Yan, Shuicheng},
+ journal={arXiv preprint arXiv:2101.11986},
+ year={2021}
+}
+```
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/t2t_vit/metafile.yml b/openmmlab_test/mmclassification-0.24.1/configs/t2t_vit/metafile.yml
new file mode 100644
index 00000000..f2125426
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/t2t_vit/metafile.yml
@@ -0,0 +1,58 @@
+Collections:
+ - Name: Tokens-to-Token ViT
+ Metadata:
+ Training Data: ImageNet-1k
+ Architecture:
+ - Layer Normalization
+ - Scaled Dot-Product Attention
+ - Attention Dropout
+ - Dropout
+ - Tokens to Token
+ Paper:
+ URL: https://arxiv.org/abs/2101.11986
+ Title: "Tokens-to-Token ViT: Training Vision Transformers from Scratch on ImageNet"
+ README: configs/t2t_vit/README.md
+ Code:
+ URL: https://github.com/open-mmlab/mmclassification/blob/v0.17.0/mmcls/models/backbones/t2t_vit.py
+ Version: v0.17.0
+
+Models:
+ - Name: t2t-vit-t-14_8xb64_in1k
+ Metadata:
+ FLOPs: 4340000000
+ Parameters: 21470000
+ In Collection: Tokens-to-Token ViT
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 81.83
+ Top 5 Accuracy: 95.84
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/t2t-vit/t2t-vit-t-14_8xb64_in1k_20211220-f7378dd5.pth
+ Config: configs/t2t_vit/t2t-vit-t-14_8xb64_in1k.py
+ - Name: t2t-vit-t-19_8xb64_in1k
+ Metadata:
+ FLOPs: 7800000000
+ Parameters: 39080000
+ In Collection: Tokens-to-Token ViT
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 82.63
+ Top 5 Accuracy: 96.18
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/t2t-vit/t2t-vit-t-19_8xb64_in1k_20211214-7f5e3aaf.pth
+ Config: configs/t2t_vit/t2t-vit-t-19_8xb64_in1k.py
+ - Name: t2t-vit-t-24_8xb64_in1k
+ Metadata:
+ FLOPs: 12690000000
+ Parameters: 64000000
+ In Collection: Tokens-to-Token ViT
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 82.71
+ Top 5 Accuracy: 96.09
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/t2t-vit/t2t-vit-t-24_8xb64_in1k_20211214-b2a68ae3.pth
+ Config: configs/t2t_vit/t2t-vit-t-24_8xb64_in1k.py
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/t2t_vit/t2t-vit-t-14_8xb64_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/t2t_vit/t2t-vit-t-14_8xb64_in1k.py
new file mode 100644
index 00000000..a391df48
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/t2t_vit/t2t-vit-t-14_8xb64_in1k.py
@@ -0,0 +1,35 @@
+_base_ = [
+ '../_base_/models/t2t-vit-t-14.py',
+ '../_base_/datasets/imagenet_bs64_t2t_224.py',
+ '../_base_/default_runtime.py',
+]
+
+# optimizer
+paramwise_cfg = dict(
+ norm_decay_mult=0.0,
+ bias_decay_mult=0.0,
+ custom_keys={'cls_token': dict(decay_mult=0.0)},
+)
+optimizer = dict(
+ type='AdamW',
+ lr=5e-4,
+ weight_decay=0.05,
+ paramwise_cfg=paramwise_cfg,
+)
+optimizer_config = dict(grad_clip=None)
+
+# learning policy
+# FIXME: lr in the first 300 epochs conforms to the CosineAnnealing and
+# the lr in the last 10 epoch equals to min_lr
+lr_config = dict(
+ policy='CosineAnnealingCooldown',
+ min_lr=1e-5,
+ cool_down_time=10,
+ cool_down_ratio=0.1,
+ by_epoch=True,
+ warmup_by_epoch=True,
+ warmup='linear',
+ warmup_iters=10,
+ warmup_ratio=1e-6)
+custom_hooks = [dict(type='EMAHook', momentum=4e-5, priority='ABOVE_NORMAL')]
+runner = dict(type='EpochBasedRunner', max_epochs=310)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/t2t_vit/t2t-vit-t-19_8xb64_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/t2t_vit/t2t-vit-t-19_8xb64_in1k.py
new file mode 100644
index 00000000..e1157f89
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/t2t_vit/t2t-vit-t-19_8xb64_in1k.py
@@ -0,0 +1,35 @@
+_base_ = [
+ '../_base_/models/t2t-vit-t-19.py',
+ '../_base_/datasets/imagenet_bs64_t2t_224.py',
+ '../_base_/default_runtime.py',
+]
+
+# optimizer
+paramwise_cfg = dict(
+ norm_decay_mult=0.0,
+ bias_decay_mult=0.0,
+ custom_keys={'cls_token': dict(decay_mult=0.0)},
+)
+optimizer = dict(
+ type='AdamW',
+ lr=5e-4,
+ weight_decay=0.065,
+ paramwise_cfg=paramwise_cfg,
+)
+optimizer_config = dict(grad_clip=None)
+
+# learning policy
+# FIXME: lr in the first 300 epochs conforms to the CosineAnnealing and
+# the lr in the last 10 epoch equals to min_lr
+lr_config = dict(
+ policy='CosineAnnealingCooldown',
+ min_lr=1e-5,
+ cool_down_time=10,
+ cool_down_ratio=0.1,
+ by_epoch=True,
+ warmup_by_epoch=True,
+ warmup='linear',
+ warmup_iters=10,
+ warmup_ratio=1e-6)
+custom_hooks = [dict(type='EMAHook', momentum=4e-5, priority='ABOVE_NORMAL')]
+runner = dict(type='EpochBasedRunner', max_epochs=310)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/t2t_vit/t2t-vit-t-24_8xb64_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/t2t_vit/t2t-vit-t-24_8xb64_in1k.py
new file mode 100644
index 00000000..815f2f15
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/t2t_vit/t2t-vit-t-24_8xb64_in1k.py
@@ -0,0 +1,35 @@
+_base_ = [
+ '../_base_/models/t2t-vit-t-24.py',
+ '../_base_/datasets/imagenet_bs64_t2t_224.py',
+ '../_base_/default_runtime.py',
+]
+
+# optimizer
+paramwise_cfg = dict(
+ norm_decay_mult=0.0,
+ bias_decay_mult=0.0,
+ custom_keys={'cls_token': dict(decay_mult=0.0)},
+)
+optimizer = dict(
+ type='AdamW',
+ lr=5e-4,
+ weight_decay=0.065,
+ paramwise_cfg=paramwise_cfg,
+)
+optimizer_config = dict(grad_clip=None)
+
+# learning policy
+# FIXME: lr in the first 300 epochs conforms to the CosineAnnealing and
+# the lr in the last 10 epoch equals to min_lr
+lr_config = dict(
+ policy='CosineAnnealingCooldown',
+ min_lr=1e-5,
+ cool_down_time=10,
+ cool_down_ratio=0.1,
+ by_epoch=True,
+ warmup_by_epoch=True,
+ warmup='linear',
+ warmup_iters=10,
+ warmup_ratio=1e-6)
+custom_hooks = [dict(type='EMAHook', momentum=4e-5, priority='ABOVE_NORMAL')]
+runner = dict(type='EpochBasedRunner', max_epochs=310)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/tnt/README.md b/openmmlab_test/mmclassification-0.24.1/configs/tnt/README.md
new file mode 100644
index 00000000..948eef74
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/tnt/README.md
@@ -0,0 +1,36 @@
+# TNT
+
+> [Transformer in Transformer](https://arxiv.org/abs/2103.00112)
+
+
+
+## Abstract
+
+Transformer is a new kind of neural architecture which encodes the input data as powerful features via the attention mechanism. Basically, the visual transformers first divide the input images into several local patches and then calculate both representations and their relationship. Since natural images are of high complexity with abundant detail and color information, the granularity of the patch dividing is not fine enough for excavating features of objects in different scales and locations. In this paper, we point out that the attention inside these local patches are also essential for building visual transformers with high performance and we explore a new architecture, namely, Transformer iN Transformer (TNT). Specifically, we regard the local patches (e.g., 16×16) as "visual sentences" and present to further divide them into smaller patches (e.g., 4×4) as "visual words". The attention of each word will be calculated with other words in the given visual sentence with negligible computational costs. Features of both words and sentences will be aggregated to enhance the representation ability. Experiments on several benchmarks demonstrate the effectiveness of the proposed TNT architecture, e.g., we achieve an 81.5% top-1 accuracy on the ImageNet, which is about 1.7% higher than that of the state-of-the-art visual transformer with similar computational cost.
+
+
+

+
+
+## Results and models
+
+### ImageNet-1k
+
+| Model | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download |
+| :---------: | :-------: | :------: | :-------: | :-------: | :--------------------------------------------------------------------------: | :----------------------------------------------------------------------------: |
+| TNT-small\* | 23.76 | 3.36 | 81.52 | 95.73 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/tnt/tnt-s-p16_16xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/tnt/tnt-small-p16_3rdparty_in1k_20210903-c56ee7df.pth) |
+
+*Models with * are converted from [timm](https://github.com/rwightman/pytorch-image-models/). The config files of these models are only for validation. We don't ensure these config files' training accuracy and welcome you to contribute your reproduction results.*
+
+## Citation
+
+```
+@misc{han2021transformer,
+ title={Transformer in Transformer},
+ author={Kai Han and An Xiao and Enhua Wu and Jianyuan Guo and Chunjing Xu and Yunhe Wang},
+ year={2021},
+ eprint={2103.00112},
+ archivePrefix={arXiv},
+ primaryClass={cs.CV}
+}
+```
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/tnt/metafile.yml b/openmmlab_test/mmclassification-0.24.1/configs/tnt/metafile.yml
new file mode 100644
index 00000000..67f3c782
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/tnt/metafile.yml
@@ -0,0 +1,29 @@
+Collections:
+ - Name: Transformer in Transformer
+ Metadata:
+ Training Data: ImageNet-1k
+ Paper:
+ URL: https://arxiv.org/abs/2103.00112
+ Title: "Transformer in Transformer"
+ README: configs/tnt/README.md
+ Code:
+ URL: https://github.com/open-mmlab/mmclassification/blob/v0.15.0/mmcls/models/backbones/tnt.py#L203
+ Version: v0.15.0
+
+Models:
+ - Name: tnt-small-p16_3rdparty_in1k
+ Metadata:
+ FLOPs: 3360000000
+ Parameters: 23760000
+ In Collection: Transformer in Transformer
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 81.52
+ Top 5 Accuracy: 95.73
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/tnt/tnt-small-p16_3rdparty_in1k_20210903-c56ee7df.pth
+ Config: configs/tnt/tnt-s-p16_16xb64_in1k.py
+ Converted From:
+ Weights: https://github.com/contrastive/pytorch-image-models/releases/download/TNT/tnt_s_patch16_224.pth.tar
+ Code: https://github.com/contrastive/pytorch-image-models/blob/809271b0f3e5d9be4e11c0c5cec1dbba8b5e2c60/timm/models/tnt.py#L144
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/tnt/tnt-s-p16_16xb64_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/tnt/tnt-s-p16_16xb64_in1k.py
new file mode 100644
index 00000000..36693689
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/tnt/tnt-s-p16_16xb64_in1k.py
@@ -0,0 +1,39 @@
+# accuracy_top-1 : 81.52 accuracy_top-5 : 95.73
+_base_ = [
+ '../_base_/models/tnt_s_patch16_224.py',
+ '../_base_/datasets/imagenet_bs32_pil_resize.py',
+ '../_base_/default_runtime.py'
+]
+
+img_norm_cfg = dict(
+ mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_rgb=True)
+
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='Resize',
+ size=(248, -1),
+ interpolation='bicubic',
+ backend='pillow'),
+ dict(type='CenterCrop', crop_size=224),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+
+dataset_type = 'ImageNet'
+data = dict(
+ samples_per_gpu=64, workers_per_gpu=4, test=dict(pipeline=test_pipeline))
+
+# optimizer
+optimizer = dict(type='AdamW', lr=1e-3, weight_decay=0.05)
+optimizer_config = dict(grad_clip=None)
+
+lr_config = dict(
+ policy='CosineAnnealing',
+ min_lr=0,
+ warmup_by_epoch=True,
+ warmup='linear',
+ warmup_iters=5,
+ warmup_ratio=1e-3)
+runner = dict(type='EpochBasedRunner', max_epochs=300)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/tnt/tnt_s_patch16_224_evalonly_imagenet.py b/openmmlab_test/mmclassification-0.24.1/configs/tnt/tnt_s_patch16_224_evalonly_imagenet.py
new file mode 100644
index 00000000..3c054d4a
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/tnt/tnt_s_patch16_224_evalonly_imagenet.py
@@ -0,0 +1,6 @@
+_base_ = 'tnt-s-p16_16xb64_in1k.py'
+
+_deprecation_ = dict(
+ expected='tnt-s-p16_16xb64_in1k.py',
+ reference='https://github.com/open-mmlab/mmclassification/pull/508',
+)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/twins/README.md b/openmmlab_test/mmclassification-0.24.1/configs/twins/README.md
new file mode 100644
index 00000000..87e72941
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/twins/README.md
@@ -0,0 +1,39 @@
+# Twins
+
+> [Twins: Revisiting the Design of Spatial Attention in Vision Transformers](http://arxiv-export-lb.library.cornell.edu/abs/2104.13840)
+
+
+
+## Abstract
+
+Very recently, a variety of vision transformer architectures for dense prediction tasks have been proposed and they show that the design of spatial attention is critical to their success in these tasks. In this work, we revisit the design of the spatial attention and demonstrate that a carefully-devised yet simple spatial attention mechanism performs favourably against the state-of-the-art schemes. As a result, we propose two vision transformer architectures, namely, Twins-PCPVT and Twins-SVT. Our proposed architectures are highly-efficient and easy to implement, only involving matrix multiplications that are highly optimized in modern deep learning frameworks. More importantly, the proposed architectures achieve excellent performance on a wide range of visual tasks, including image level classification as well as dense detection and segmentation. The simplicity and strong performance suggest that our proposed architectures may serve as stronger backbones for many vision tasks. Our code is released at [this https URL](https://github.com/Meituan-AutoML/Twins).
+
+
+

+
+
+## Results and models
+
+### ImageNet-1k
+
+| Model | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download |
+| :-----------: | :-------: | :------: | :-------: | :-------: | :-------------------------------------------------------------------------: | :---------------------------------------------------------------------------: |
+| PCPVT-small\* | 24.11 | 3.67 | 81.14 | 95.69 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/twins/twins-pcpvt-small_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/twins/twins-pcpvt-small_3rdparty_8xb128_in1k_20220126-ef23c132.pth) |
+| PCPVT-base\* | 43.83 | 6.45 | 82.66 | 96.26 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/twins/twins-pcpvt-base_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/twins/twins-pcpvt-base_3rdparty_8xb128_in1k_20220126-f8c4b0d5.pth) |
+| PCPVT-large\* | 60.99 | 9.51 | 83.09 | 96.59 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/twins/twins-pcpvt-large_16xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/twins/twins-pcpvt-large_3rdparty_16xb64_in1k_20220126-c1ef8d80.pth) |
+| SVT-small\* | 24.06 | 2.82 | 81.77 | 95.57 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/twins/twins-svt-small_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/twins/twins-svt-small_3rdparty_8xb128_in1k_20220126-8fe5205b.pth) |
+| SVT-base\* | 56.07 | 8.35 | 83.13 | 96.29 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/twins/twins-svt-base_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/twins/twins-svt-base_3rdparty_8xb128_in1k_20220126-e31cc8e9.pth) |
+| SVT-large\* | 99.27 | 14.82 | 83.60 | 96.50 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/twins/twins-svt-large_16xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/twins/twins-svt-large_3rdparty_16xb64_in1k_20220126-4817645f.pth) |
+
+*Models with * are converted from [the official repo](https://github.com/Meituan-AutoML/Twins). The config files of these models are only for validation. We don't ensure these config files' training accuracy and welcome you to contribute your reproduction results. The validation accuracy is a little different from the official paper because of the PyTorch version. This result is get in PyTorch=1.9 while the official result is get in PyTorch=1.7*
+
+## Citation
+
+```
+@article{chu2021twins,
+ title={Twins: Revisiting spatial attention design in vision transformers},
+ author={Chu, Xiangxiang and Tian, Zhi and Wang, Yuqing and Zhang, Bo and Ren, Haibing and Wei, Xiaolin and Xia, Huaxia and Shen, Chunhua},
+ journal={arXiv preprint arXiv:2104.13840},
+ year={2021}altgvt
+}
+```
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/twins/metafile.yml b/openmmlab_test/mmclassification-0.24.1/configs/twins/metafile.yml
new file mode 100644
index 00000000..f8a7d819
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/twins/metafile.yml
@@ -0,0 +1,114 @@
+Collections:
+ - Name: Twins
+ Metadata:
+ Training Data: ImageNet-1k
+ Architecture:
+ - Global Subsampled Attention
+ - Locally Grouped SelfAttention
+ - Conditional Position Encoding
+ - Pyramid Vision Transformer
+ Paper:
+ URL: http://arxiv-export-lb.library.cornell.edu/abs/2104.13840
+ Title: "Twins: Revisiting the Design of Spatial Attention in Vision Transformers"
+ README: configs/twins/README.md
+ Code:
+ URL: https://github.com/open-mmlab/mmclassification/blob/v0.20.1/mmcls/models/backbones/twins.py
+ Version: v0.20.1
+
+Models:
+ - Name: twins-pcpvt-small_3rdparty_8xb128_in1k
+ Metadata:
+ FLOPs: 3670000000 # 3.67G
+ Parameters: 24110000 # 24.11M
+ In Collection: Twins
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 81.14
+ Top 5 Accuracy: 95.69
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/twins/twins-pcpvt-small_3rdparty_8xb128_in1k_20220126-ef23c132.pth
+ Config: configs/twins/twins-pcpvt-small_8xb128_in1k.py
+ Converted From:
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/twins_pcpvt_small-e70e7e7a.pth
+ Code: https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/twins.py
+ - Name: twins-pcpvt-base_3rdparty_8xb128_in1k
+ Metadata:
+ FLOPs: 6450000000 # 6.45G
+ Parameters: 43830000 # 43.83M
+ In Collection: Twins
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 82.66
+ Top 5 Accuracy: 96.26
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/twins/twins-pcpvt-base_3rdparty_8xb128_in1k_20220126-f8c4b0d5.pth
+ Config: configs/twins/twins-pcpvt-base_8xb128_in1k.py
+ Converted From:
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/twins_pcpvt_small-e70e7e7a.pth
+ Code: https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/twins.py
+ - Name: twins-pcpvt-large_3rdparty_16xb64_in1k
+ Metadata:
+ FLOPs: 9510000000 # 9.51G
+ Parameters: 60990000 # 60.99M
+ In Collection: Twins
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 83.09
+ Top 5 Accuracy: 96.59
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/twins/twins-pcpvt-large_3rdparty_16xb64_in1k_20220126-c1ef8d80.pth
+ Config: configs/twins/twins-pcpvt-large_16xb64_in1k.py
+ Converted From:
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/twins_pcpvt_small-e70e7e7a.pth
+ Code: https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/twins.py
+ - Name: twins-svt-small_3rdparty_8xb128_in1k
+ Metadata:
+ FLOPs: 2820000000 # 2.82G
+ Parameters: 24060000 # 24.06M
+ In Collection: Twins
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 81.77
+ Top 5 Accuracy: 95.57
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/twins/twins-svt-small_3rdparty_8xb128_in1k_20220126-8fe5205b.pth
+ Config: configs/twins/twins-svt-small_8xb128_in1k.py
+ Converted From:
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/twins_pcpvt_small-e70e7e7a.pth
+ Code: https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/twins.py
+ - Name: twins-svt-base_8xb128_3rdparty_in1k
+ Metadata:
+ FLOPs: 8350000000 # 8.35G
+ Parameters: 56070000 # 56.07M
+ In Collection: Twins
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 83.13
+ Top 5 Accuracy: 96.29
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/twins/twins-svt-base_3rdparty_8xb128_in1k_20220126-e31cc8e9.pth
+ Config: configs/twins/twins-svt-base_8xb128_in1k.py
+ Converted From:
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/twins_pcpvt_small-e70e7e7a.pth
+ Code: https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/twins.py
+ - Name: twins-svt-large_3rdparty_16xb64_in1k
+ Metadata:
+ FLOPs: 14820000000 # 14.82G
+ Parameters: 99270000 # 99.27M
+ In Collection: Twins
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 83.60
+ Top 5 Accuracy: 96.50
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/twins/twins-svt-large_3rdparty_16xb64_in1k_20220126-4817645f.pth
+ Config: configs/twins/twins-svt-large_16xb64_in1k.py
+ Converted From:
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/twins_pcpvt_small-e70e7e7a.pth
+ Code: https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/twins.py
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/twins/twins-pcpvt-base_8xb128_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/twins/twins-pcpvt-base_8xb128_in1k.py
new file mode 100644
index 00000000..8ea9adc3
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/twins/twins-pcpvt-base_8xb128_in1k.py
@@ -0,0 +1,33 @@
+_base_ = [
+ '../_base_/models/twins_pcpvt_base.py',
+ '../_base_/datasets/imagenet_bs64_swin_224.py',
+ '../_base_/schedules/imagenet_bs1024_adamw_swin.py',
+ '../_base_/default_runtime.py'
+]
+
+data = dict(samples_per_gpu=128)
+
+paramwise_cfg = dict(_delete=True, norm_decay_mult=0.0, bias_decay_mult=0.0)
+
+# for batch in each gpu is 128, 8 gpu
+# lr = 5e-4 * 128 * 8 / 512 = 0.001
+optimizer = dict(
+ type='AdamW',
+ lr=5e-4 * 128 * 8 / 512,
+ weight_decay=0.05,
+ eps=1e-8,
+ betas=(0.9, 0.999),
+ paramwise_cfg=paramwise_cfg)
+optimizer_config = dict(_delete_=True, grad_clip=dict(max_norm=5.0))
+
+# learning policy
+lr_config = dict(
+ policy='CosineAnnealing',
+ by_epoch=True,
+ min_lr_ratio=1e-2,
+ warmup='linear',
+ warmup_ratio=1e-3,
+ warmup_iters=5,
+ warmup_by_epoch=True)
+
+evaluation = dict(interval=1, metric='accuracy')
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/twins/twins-pcpvt-large_16xb64_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/twins/twins-pcpvt-large_16xb64_in1k.py
new file mode 100644
index 00000000..e9c9a35e
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/twins/twins-pcpvt-large_16xb64_in1k.py
@@ -0,0 +1,5 @@
+_base_ = ['twins-pcpvt-base_8xb128_in1k.py']
+
+model = dict(backbone=dict(arch='large'), head=dict(in_channels=512))
+
+data = dict(samples_per_gpu=64)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/twins/twins-pcpvt-small_8xb128_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/twins/twins-pcpvt-small_8xb128_in1k.py
new file mode 100644
index 00000000..cb8bdc38
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/twins/twins-pcpvt-small_8xb128_in1k.py
@@ -0,0 +1,3 @@
+_base_ = ['twins-pcpvt-base_8xb128_in1k.py']
+
+model = dict(backbone=dict(arch='small'), head=dict(in_channels=512))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/twins/twins-svt-base_8xb128_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/twins/twins-svt-base_8xb128_in1k.py
new file mode 100644
index 00000000..e2db2301
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/twins/twins-svt-base_8xb128_in1k.py
@@ -0,0 +1,33 @@
+_base_ = [
+ '../_base_/models/twins_svt_base.py',
+ '../_base_/datasets/imagenet_bs64_swin_224.py',
+ '../_base_/schedules/imagenet_bs1024_adamw_swin.py',
+ '../_base_/default_runtime.py'
+]
+
+data = dict(samples_per_gpu=128)
+
+paramwise_cfg = dict(_delete=True, norm_decay_mult=0.0, bias_decay_mult=0.0)
+
+# for batch in each gpu is 128, 8 gpu
+# lr = 5e-4 * 128 * 8 / 512 = 0.001
+optimizer = dict(
+ type='AdamW',
+ lr=5e-4 * 128 * 8 / 512,
+ weight_decay=0.05,
+ eps=1e-8,
+ betas=(0.9, 0.999),
+ paramwise_cfg=paramwise_cfg)
+optimizer_config = dict(_delete_=True, grad_clip=dict(max_norm=5.0))
+
+# learning policy
+lr_config = dict(
+ policy='CosineAnnealing',
+ by_epoch=True,
+ min_lr_ratio=1e-2,
+ warmup='linear',
+ warmup_ratio=1e-3,
+ warmup_iters=5,
+ warmup_by_epoch=True)
+
+evaluation = dict(interval=1, metric='accuracy')
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/twins/twins-svt-large_16xb64_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/twins/twins-svt-large_16xb64_in1k.py
new file mode 100644
index 00000000..9288a706
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/twins/twins-svt-large_16xb64_in1k.py
@@ -0,0 +1,5 @@
+_base_ = ['twins-svt-base_8xb128_in1k.py']
+
+data = dict(samples_per_gpu=64)
+
+model = dict(backbone=dict(arch='large'), head=dict(in_channels=1024))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/twins/twins-svt-small_8xb128_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/twins/twins-svt-small_8xb128_in1k.py
new file mode 100644
index 00000000..b92f1d3f
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/twins/twins-svt-small_8xb128_in1k.py
@@ -0,0 +1,3 @@
+_base_ = ['twins-svt-base_8xb128_in1k.py']
+
+model = dict(backbone=dict(arch='small'), head=dict(in_channels=512))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/van/README.md b/openmmlab_test/mmclassification-0.24.1/configs/van/README.md
new file mode 100644
index 00000000..a84cf329
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/van/README.md
@@ -0,0 +1,50 @@
+# Visual Attention Network
+
+> [Visual Attention Network](https://arxiv.org/pdf/2202.09741v2.pdf)
+
+
+
+## Abstract
+
+While originally designed for natural language processing (NLP) tasks, the self-attention mechanism has recently taken various computer vision areas by storm. However, the 2D nature of images brings three challenges for applying self-attention in computer vision. (1) Treating images as 1D sequences neglects their 2D structures. (2) The quadratic complexity is too expensive for high-resolution images. (3) It only captures spatial adaptability but ignores channel adaptability. In this paper, we propose a novel large kernel attention (LKA) module to enable self-adaptive and long-range correlations in self-attention while avoiding the above issues. We further introduce a novel neural network based on LKA, namely Visual Attention Network (VAN). While extremely simple and efficient, VAN outperforms the state-of-the-art vision transformers and convolutional neural networks with a large margin in extensive experiments, including image classification, object detection, semantic segmentation, instance segmentation, etc.
+
+
+

+
+
+## Results and models
+
+### ImageNet-1k
+
+| Model | Pretrain | resolution | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download |
+| :------: | :----------: | :--------: | :-------: | :------: | :-------: | :-------: | :----------------------------------------------------------------: | :-------------------------------------------------------------------: |
+| VAN-B0\* | From scratch | 224x224 | 4.11 | 0.88 | 75.41 | 93.02 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/van/van-b0_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/van/van-tiny_8xb128_in1k_20220501-385941af.pth) |
+| VAN-B1\* | From scratch | 224x224 | 13.86 | 2.52 | 81.01 | 95.63 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/van/van-b1_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/van/van-small_8xb128_in1k_20220501-17bc91aa.pth) |
+| VAN-B2\* | From scratch | 224x224 | 26.58 | 5.03 | 82.80 | 96.21 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/van/van-b2_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/van/van-base_8xb128_in1k_20220501-6a4cc31b.pth) |
+| VAN-B3\* | From scratch | 224x224 | 44.77 | 8.99 | 83.86 | 96.73 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/van/van-b3_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/van/van-large_8xb128_in1k_20220501-f212ba21.pth) |
+| VAN-B4\* | From scratch | 224x224 | 60.28 | 12.22 | 84.13 | 96.86 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/van/van-b4_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/van/van-b4_3rdparty_in1k_20220909-f4665b92.pth) |
+
+\*Models with * are converted from [the official repo](https://github.com/Visual-Attention-Network/VAN-Classification). The config files of these models are only for validation. We don't ensure these config files' training accuracy and welcome you to contribute your reproduction results.
+
+### Pre-trained Models
+
+The pre-trained models on ImageNet-21k are used to fine-tune on the downstream tasks.
+
+| Model | Pretrain | resolution | Params(M) | Flops(G) | Download |
+| :------: | :----------: | :--------: | :-------: | :------: | :---------------------------------------------------------------------------------------------------------: |
+| VAN-B4\* | ImageNet-21k | 224x224 | 60.28 | 12.22 | [model](https://download.openmmlab.com/mmclassification/v0/van/van-b4_3rdparty_in21k_20220909-db926b18.pth) |
+| VAN-B5\* | ImageNet-21k | 224x224 | 89.97 | 17.21 | [model](https://download.openmmlab.com/mmclassification/v0/van/van-b5_3rdparty_in21k_20220909-18e904e3.pth) |
+| VAN-B6\* | ImageNet-21k | 224x224 | 283.9 | 55.28 | [model](https://download.openmmlab.com/mmclassification/v0/van/van-b6_3rdparty_in21k_20220909-96c2cb3a.pth) |
+
+\*Models with * are converted from [the official repo](https://github.com/Visual-Attention-Network/VAN-Classification).
+
+## Citation
+
+```
+@article{guo2022visual,
+ title={Visual Attention Network},
+ author={Guo, Meng-Hao and Lu, Cheng-Ze and Liu, Zheng-Ning and Cheng, Ming-Ming and Hu, Shi-Min},
+ journal={arXiv preprint arXiv:2202.09741},
+ year={2022}
+}
+```
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/van/metafile.yml b/openmmlab_test/mmclassification-0.24.1/configs/van/metafile.yml
new file mode 100644
index 00000000..c32df84a
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/van/metafile.yml
@@ -0,0 +1,84 @@
+Collections:
+ - Name: Visual-Attention-Network
+ Metadata:
+ Training Data: ImageNet-1k
+ Training Techniques:
+ - AdamW
+ - Weight Decay
+ Architecture:
+ - Visual Attention Network
+ - LKA
+ Paper:
+ URL: https://arxiv.org/pdf/2202.09741v2.pdf
+ Title: "Visual Attention Network"
+ README: configs/van/README.md
+ Code:
+ URL: https://github.com/open-mmlab/mmclassification/blob/v0.23.0/mmcls/models/backbones/van.py
+ Version: v0.23.0
+
+Models:
+ - Name: van-b0_3rdparty_in1k
+ Metadata:
+ FLOPs: 880000000 # 0.88G
+ Parameters: 4110000 # 4.11M
+ In Collection: Visual-Attention-Network
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 75.41
+ Top 5 Accuracy: 93.02
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/van/van-tiny_8xb128_in1k_20220501-385941af.pth
+ Config: configs/van/van-b0_8xb128_in1k.py
+ - Name: van-b1_3rdparty_in1k
+ Metadata:
+ FLOPs: 2520000000 # 2.52G
+ Parameters: 13860000 # 13.86M
+ In Collection: Visual-Attention-Network
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 81.01
+ Top 5 Accuracy: 95.63
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/van/van-small_8xb128_in1k_20220501-17bc91aa.pth
+ Config: configs/van/van-b1_8xb128_in1k.py
+ - Name: van-b2_3rdparty_in1k
+ Metadata:
+ FLOPs: 5030000000 # 5.03G
+ Parameters: 26580000 # 26.58M
+ In Collection: Visual-Attention-Network
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 82.80
+ Top 5 Accuracy: 96.21
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/van/van-base_8xb128_in1k_20220501-6a4cc31b.pth
+ Config: configs/van/van-b2_8xb128_in1k.py
+ - Name: van-b3_3rdparty_in1k
+ Metadata:
+ FLOPs: 8990000000 # 8.99G
+ Parameters: 44770000 # 44.77M
+ In Collection: Visual-Attention-Network
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 83.86
+ Top 5 Accuracy: 96.73
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/van/van-large_8xb128_in1k_20220501-f212ba21.pth
+ Config: configs/van/van-b3_8xb128_in1k.py
+ - Name: van-b4_3rdparty_in1k
+ Metadata:
+ FLOPs: 12220000000 # 12.22G
+ Parameters: 60280000 # 60.28M
+ In Collection: Visual-Attention-Network
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 84.13
+ Top 5 Accuracy: 96.86
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/van/van-b4_3rdparty_in1k_20220909-f4665b92.pth
+ Config: configs/van/van-b4_8xb128_in1k.py
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/van/van-b0_8xb128_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/van/van-b0_8xb128_in1k.py
new file mode 100644
index 00000000..1acb7af3
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/van/van-b0_8xb128_in1k.py
@@ -0,0 +1,61 @@
+_base_ = [
+ '../_base_/models/van/van_b0.py',
+ '../_base_/datasets/imagenet_bs64_swin_224.py',
+ '../_base_/schedules/imagenet_bs1024_adamw_swin.py',
+ '../_base_/default_runtime.py'
+]
+
+# Note that the mean and variance used here are different from other configs
+img_norm_cfg = dict(
+ mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_rgb=True)
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='RandomResizedCrop',
+ size=224,
+ backend='pillow',
+ interpolation='bicubic'),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(
+ type='RandAugment',
+ policies={{_base_.rand_increasing_policies}},
+ num_policies=2,
+ total_level=10,
+ magnitude_level=9,
+ magnitude_std=0.5,
+ hparams=dict(
+ pad_val=[round(x) for x in img_norm_cfg['mean'][::-1]],
+ interpolation='bicubic')),
+ dict(type='ColorJitter', brightness=0.4, contrast=0.4, saturation=0.4),
+ dict(
+ type='RandomErasing',
+ erase_prob=0.25,
+ mode='rand',
+ min_area_ratio=0.02,
+ max_area_ratio=1 / 3,
+ fill_color=img_norm_cfg['mean'][::-1],
+ fill_std=img_norm_cfg['std'][::-1]),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='Resize',
+ size=(248, -1),
+ backend='pillow',
+ interpolation='bicubic'),
+ dict(type='CenterCrop', crop_size=224),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+
+data = dict(
+ samples_per_gpu=128,
+ train=dict(pipeline=train_pipeline),
+ val=dict(pipeline=test_pipeline),
+ test=dict(pipeline=test_pipeline))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/van/van-b1_8xb128_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/van/van-b1_8xb128_in1k.py
new file mode 100644
index 00000000..64483db8
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/van/van-b1_8xb128_in1k.py
@@ -0,0 +1,61 @@
+_base_ = [
+ '../_base_/models/van/van_b1.py',
+ '../_base_/datasets/imagenet_bs64_swin_224.py',
+ '../_base_/schedules/imagenet_bs1024_adamw_swin.py',
+ '../_base_/default_runtime.py'
+]
+
+# Note that the mean and variance used here are different from other configs
+img_norm_cfg = dict(
+ mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_rgb=True)
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='RandomResizedCrop',
+ size=224,
+ backend='pillow',
+ interpolation='bicubic'),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(
+ type='RandAugment',
+ policies={{_base_.rand_increasing_policies}},
+ num_policies=2,
+ total_level=10,
+ magnitude_level=9,
+ magnitude_std=0.5,
+ hparams=dict(
+ pad_val=[round(x) for x in img_norm_cfg['mean'][::-1]],
+ interpolation='bicubic')),
+ dict(type='ColorJitter', brightness=0.4, contrast=0.4, saturation=0.4),
+ dict(
+ type='RandomErasing',
+ erase_prob=0.25,
+ mode='rand',
+ min_area_ratio=0.02,
+ max_area_ratio=1 / 3,
+ fill_color=img_norm_cfg['mean'][::-1],
+ fill_std=img_norm_cfg['std'][::-1]),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='Resize',
+ size=(248, -1),
+ backend='pillow',
+ interpolation='bicubic'),
+ dict(type='CenterCrop', crop_size=224),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+
+data = dict(
+ samples_per_gpu=128,
+ train=dict(pipeline=train_pipeline),
+ val=dict(pipeline=test_pipeline),
+ test=dict(pipeline=test_pipeline))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/van/van-b2_8xb128_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/van/van-b2_8xb128_in1k.py
new file mode 100644
index 00000000..88493dc2
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/van/van-b2_8xb128_in1k.py
@@ -0,0 +1,61 @@
+_base_ = [
+ '../_base_/models/van/van_b2.py',
+ '../_base_/datasets/imagenet_bs64_swin_224.py',
+ '../_base_/schedules/imagenet_bs1024_adamw_swin.py',
+ '../_base_/default_runtime.py'
+]
+
+# Note that the mean and variance used here are different from other configs
+img_norm_cfg = dict(
+ mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_rgb=True)
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='RandomResizedCrop',
+ size=224,
+ backend='pillow',
+ interpolation='bicubic'),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(
+ type='RandAugment',
+ policies={{_base_.rand_increasing_policies}},
+ num_policies=2,
+ total_level=10,
+ magnitude_level=9,
+ magnitude_std=0.5,
+ hparams=dict(
+ pad_val=[round(x) for x in img_norm_cfg['mean'][::-1]],
+ interpolation='bicubic')),
+ dict(type='ColorJitter', brightness=0.4, contrast=0.4, saturation=0.4),
+ dict(
+ type='RandomErasing',
+ erase_prob=0.25,
+ mode='rand',
+ min_area_ratio=0.02,
+ max_area_ratio=1 / 3,
+ fill_color=img_norm_cfg['mean'][::-1],
+ fill_std=img_norm_cfg['std'][::-1]),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='Resize',
+ size=(248, -1),
+ backend='pillow',
+ interpolation='bicubic'),
+ dict(type='CenterCrop', crop_size=224),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+
+data = dict(
+ samples_per_gpu=128,
+ train=dict(pipeline=train_pipeline),
+ val=dict(pipeline=test_pipeline),
+ test=dict(pipeline=test_pipeline))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/van/van-b3_8xb128_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/van/van-b3_8xb128_in1k.py
new file mode 100644
index 00000000..6b415f65
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/van/van-b3_8xb128_in1k.py
@@ -0,0 +1,61 @@
+_base_ = [
+ '../_base_/models/van/van_b3.py',
+ '../_base_/datasets/imagenet_bs64_swin_224.py',
+ '../_base_/schedules/imagenet_bs1024_adamw_swin.py',
+ '../_base_/default_runtime.py'
+]
+
+# Note that the mean and variance used here are different from other configs
+img_norm_cfg = dict(
+ mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_rgb=True)
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='RandomResizedCrop',
+ size=224,
+ backend='pillow',
+ interpolation='bicubic'),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(
+ type='RandAugment',
+ policies={{_base_.rand_increasing_policies}},
+ num_policies=2,
+ total_level=10,
+ magnitude_level=9,
+ magnitude_std=0.5,
+ hparams=dict(
+ pad_val=[round(x) for x in img_norm_cfg['mean'][::-1]],
+ interpolation='bicubic')),
+ dict(type='ColorJitter', brightness=0.4, contrast=0.4, saturation=0.4),
+ dict(
+ type='RandomErasing',
+ erase_prob=0.25,
+ mode='rand',
+ min_area_ratio=0.02,
+ max_area_ratio=1 / 3,
+ fill_color=img_norm_cfg['mean'][::-1],
+ fill_std=img_norm_cfg['std'][::-1]),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='Resize',
+ size=(248, -1),
+ backend='pillow',
+ interpolation='bicubic'),
+ dict(type='CenterCrop', crop_size=224),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+
+data = dict(
+ samples_per_gpu=128,
+ train=dict(pipeline=train_pipeline),
+ val=dict(pipeline=test_pipeline),
+ test=dict(pipeline=test_pipeline))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/van/van-b4_8xb128_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/van/van-b4_8xb128_in1k.py
new file mode 100644
index 00000000..ba8914f8
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/van/van-b4_8xb128_in1k.py
@@ -0,0 +1,61 @@
+_base_ = [
+ '../_base_/models/van/van_b4.py',
+ '../_base_/datasets/imagenet_bs64_swin_224.py',
+ '../_base_/schedules/imagenet_bs1024_adamw_swin.py',
+ '../_base_/default_runtime.py'
+]
+
+# Note that the mean and variance used here are different from other configs
+img_norm_cfg = dict(
+ mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_rgb=True)
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='RandomResizedCrop',
+ size=224,
+ backend='pillow',
+ interpolation='bicubic'),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(
+ type='RandAugment',
+ policies={{_base_.rand_increasing_policies}},
+ num_policies=2,
+ total_level=10,
+ magnitude_level=9,
+ magnitude_std=0.5,
+ hparams=dict(
+ pad_val=[round(x) for x in img_norm_cfg['mean'][::-1]],
+ interpolation='bicubic')),
+ dict(type='ColorJitter', brightness=0.4, contrast=0.4, saturation=0.4),
+ dict(
+ type='RandomErasing',
+ erase_prob=0.25,
+ mode='rand',
+ min_area_ratio=0.02,
+ max_area_ratio=1 / 3,
+ fill_color=img_norm_cfg['mean'][::-1],
+ fill_std=img_norm_cfg['std'][::-1]),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='Resize',
+ size=(248, -1),
+ backend='pillow',
+ interpolation='bicubic'),
+ dict(type='CenterCrop', crop_size=224),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+
+data = dict(
+ samples_per_gpu=128,
+ train=dict(pipeline=train_pipeline),
+ val=dict(pipeline=test_pipeline),
+ test=dict(pipeline=test_pipeline))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/van/van-base_8xb128_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/van/van-base_8xb128_in1k.py
new file mode 100644
index 00000000..e331980d
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/van/van-base_8xb128_in1k.py
@@ -0,0 +1,6 @@
+_base_ = ['./van-b2_8xb128_in1k.py']
+
+_deprecation_ = dict(
+ expected='van-b2_8xb128_in1k.p',
+ reference='https://github.com/open-mmlab/mmclassification/pull/1017',
+)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/van/van-large_8xb128_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/van/van-large_8xb128_in1k.py
new file mode 100644
index 00000000..84f8c7ed
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/van/van-large_8xb128_in1k.py
@@ -0,0 +1,6 @@
+_base_ = ['./van-b3_8xb128_in1k.py']
+
+_deprecation_ = dict(
+ expected='van-b3_8xb128_in1k.p',
+ reference='https://github.com/open-mmlab/mmclassification/pull/1017',
+)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/van/van-small_8xb128_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/van/van-small_8xb128_in1k.py
new file mode 100644
index 00000000..75d3220b
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/van/van-small_8xb128_in1k.py
@@ -0,0 +1,6 @@
+_base_ = ['./van-b1_8xb128_in1k.py']
+
+_deprecation_ = dict(
+ expected='van-b1_8xb128_in1k.py',
+ reference='https://github.com/open-mmlab/mmclassification/pull/1017',
+)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/van/van-tiny_8xb128_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/van/van-tiny_8xb128_in1k.py
new file mode 100644
index 00000000..9f83e77c
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/van/van-tiny_8xb128_in1k.py
@@ -0,0 +1,6 @@
+_base_ = ['./van-b0_8xb128_in1k.py']
+
+_deprecation_ = dict(
+ expected='van-b0_8xb128_in1k.py',
+ reference='https://github.com/open-mmlab/mmclassification/pull/1017',
+)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/vgg/README.md b/openmmlab_test/mmclassification-0.24.1/configs/vgg/README.md
new file mode 100644
index 00000000..454489ff
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/vgg/README.md
@@ -0,0 +1,39 @@
+# VGG
+
+> [Very Deep Convolutional Networks for Large-Scale Image Recognition](https://arxiv.org/abs/1409.1556)
+
+
+
+## Abstract
+
+In this work we investigate the effect of the convolutional network depth on its accuracy in the large-scale image recognition setting. Our main contribution is a thorough evaluation of networks of increasing depth using an architecture with very small (3x3) convolution filters, which shows that a significant improvement on the prior-art configurations can be achieved by pushing the depth to 16-19 weight layers. These findings were the basis of our ImageNet Challenge 2014 submission, where our team secured the first and the second places in the localisation and classification tracks respectively. We also show that our representations generalise well to other datasets, where they achieve state-of-the-art results. We have made our two best-performing ConvNet models publicly available to facilitate further research on the use of deep visual representations in computer vision.
+
+
+

+
+
+## Results and models
+
+### ImageNet-1k
+
+| Model | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download |
+| :-------: | :-------: | :------: | :-------: | :-------: | :---------------------------------------------------------------------------: | :-----------------------------------------------------------------------------: |
+| VGG-11 | 132.86 | 7.63 | 68.75 | 88.87 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/vgg/vgg11_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/vgg/vgg11_batch256_imagenet_20210208-4271cd6c.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/vgg/vgg11_batch256_imagenet_20210208-4271cd6c.log.json) |
+| VGG-13 | 133.05 | 11.34 | 70.02 | 89.46 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/vgg/vgg13_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/vgg/vgg13_batch256_imagenet_20210208-4d1d6080.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/vgg/vgg13_batch256_imagenet_20210208-4d1d6080.log.json) |
+| VGG-16 | 138.36 | 15.5 | 71.62 | 90.49 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/vgg/vgg16_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/vgg/vgg16_batch256_imagenet_20210208-db26f1a5.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/vgg/vgg16_batch256_imagenet_20210208-db26f1a5.log.json) |
+| VGG-19 | 143.67 | 19.67 | 72.41 | 90.80 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/vgg/vgg19_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/vgg/vgg19_batch256_imagenet_20210208-e6920e4a.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/vgg/vgg19_batch256_imagenet_20210208-e6920e4a.log.json) |
+| VGG-11-BN | 132.87 | 7.64 | 70.67 | 90.16 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/vgg/vgg11bn_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/vgg/vgg11_bn_batch256_imagenet_20210207-f244902c.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/vgg/vgg11_bn_batch256_imagenet_20210207-f244902c.log.json) |
+| VGG-13-BN | 133.05 | 11.36 | 72.12 | 90.66 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/vgg/vgg13bn_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/vgg/vgg13_bn_batch256_imagenet_20210207-1a8b7864.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/vgg/vgg13_bn_batch256_imagenet_20210207-1a8b7864.log.json) |
+| VGG-16-BN | 138.37 | 15.53 | 73.74 | 91.66 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/vgg/vgg16_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/vgg/vgg16_bn_batch256_imagenet_20210208-7e55cd29.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/vgg/vgg16_bn_batch256_imagenet_20210208-7e55cd29.log.json) |
+| VGG-19-BN | 143.68 | 19.7 | 74.68 | 92.27 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/vgg/vgg19bn_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/vgg/vgg19_bn_batch256_imagenet_20210208-da620c4f.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/vgg/vgg19_bn_batch256_imagenet_20210208-da620c4f.log.json) |
+
+## Citation
+
+```
+@article{simonyan2014very,
+ title={Very deep convolutional networks for large-scale image recognition},
+ author={Simonyan, Karen and Zisserman, Andrew},
+ journal={arXiv preprint arXiv:1409.1556},
+ year={2014}
+}
+```
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/vgg/metafile.yml b/openmmlab_test/mmclassification-0.24.1/configs/vgg/metafile.yml
new file mode 100644
index 00000000..4410c950
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/vgg/metafile.yml
@@ -0,0 +1,125 @@
+Collections:
+ - Name: VGG
+ Metadata:
+ Training Data: ImageNet-1k
+ Training Techniques:
+ - SGD with Momentum
+ - Weight Decay
+ Training Resources: 8x Xp GPUs
+ Epochs: 100
+ Batch Size: 256
+ Architecture:
+ - VGG
+ Paper:
+ URL: https://arxiv.org/abs/1409.1556
+ Title: "Very Deep Convolutional Networks for Large-Scale Image"
+ README: configs/vgg/README.md
+ Code:
+ URL: https://github.com/open-mmlab/mmclassification/blob/v0.15.0/mmcls/models/backbones/vgg.py#L39
+ Version: v0.15.0
+
+Models:
+ - Name: vgg11_8xb32_in1k
+ Metadata:
+ FLOPs: 7630000000
+ Parameters: 132860000
+ In Collection: VGG
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 68.75
+ Top 5 Accuracy: 88.87
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/vgg/vgg11_batch256_imagenet_20210208-4271cd6c.pth
+ Config: configs/vgg/vgg11_8xb32_in1k.py
+ - Name: vgg13_8xb32_in1k
+ Metadata:
+ FLOPs: 11340000000
+ Parameters: 133050000
+ In Collection: VGG
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 70.02
+ Top 5 Accuracy: 89.46
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/vgg/vgg13_batch256_imagenet_20210208-4d1d6080.pth
+ Config: configs/vgg/vgg13_8xb32_in1k.py
+ - Name: vgg16_8xb32_in1k
+ Metadata:
+ FLOPs: 15500000000
+ Parameters: 138360000
+ In Collection: VGG
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 71.62
+ Top 5 Accuracy: 90.49
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/vgg/vgg16_batch256_imagenet_20210208-db26f1a5.pth
+ Config: configs/vgg/vgg16_8xb32_in1k.py
+ - Name: vgg19_8xb32_in1k
+ Metadata:
+ FLOPs: 19670000000
+ Parameters: 143670000
+ In Collection: VGG
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 72.41
+ Top 5 Accuracy: 90.8
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/vgg/vgg19_batch256_imagenet_20210208-e6920e4a.pth
+ Config: configs/vgg/vgg19_8xb32_in1k.py
+ - Name: vgg11bn_8xb32_in1k
+ Metadata:
+ FLOPs: 7640000000
+ Parameters: 132870000
+ In Collection: VGG
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 70.67
+ Top 5 Accuracy: 90.16
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/vgg/vgg11_bn_batch256_imagenet_20210207-f244902c.pth
+ Config: configs/vgg/vgg11bn_8xb32_in1k.py
+ - Name: vgg13bn_8xb32_in1k
+ Metadata:
+ FLOPs: 11360000000
+ Parameters: 133050000
+ In Collection: VGG
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 72.12
+ Top 5 Accuracy: 90.66
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/vgg/vgg13_bn_batch256_imagenet_20210207-1a8b7864.pth
+ Config: configs/vgg/vgg13bn_8xb32_in1k.py
+ - Name: vgg16bn_8xb32_in1k
+ Metadata:
+ FLOPs: 15530000000
+ Parameters: 138370000
+ In Collection: VGG
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 73.74
+ Top 5 Accuracy: 91.66
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/vgg/vgg16_bn_batch256_imagenet_20210208-7e55cd29.pth
+ Config: configs/vgg/vgg16bn_8xb32_in1k.py
+ - Name: vgg19bn_8xb32_in1k
+ Metadata:
+ FLOPs: 19700000000
+ Parameters: 143680000
+ In Collection: VGG
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 74.68
+ Top 5 Accuracy: 92.27
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/vgg/vgg19_bn_batch256_imagenet_20210208-da620c4f.pth
+ Config: configs/vgg/vgg19bn_8xb32_in1k.py
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/vgg/vgg11_b32x8_imagenet.py b/openmmlab_test/mmclassification-0.24.1/configs/vgg/vgg11_8xb32_in1k.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/vgg/vgg11_b32x8_imagenet.py
rename to openmmlab_test/mmclassification-0.24.1/configs/vgg/vgg11_8xb32_in1k.py
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/vgg/vgg11_b32x8_imagenet.py b/openmmlab_test/mmclassification-0.24.1/configs/vgg/vgg11_b32x8_imagenet.py
new file mode 100644
index 00000000..b15396be
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/vgg/vgg11_b32x8_imagenet.py
@@ -0,0 +1,6 @@
+_base_ = 'vgg11_8xb32_in1k.py'
+
+_deprecation_ = dict(
+ expected='vgg11_8xb32_in1k.py',
+ reference='https://github.com/open-mmlab/mmclassification/pull/508',
+)
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/vgg/vgg11bn_b32x8_imagenet.py b/openmmlab_test/mmclassification-0.24.1/configs/vgg/vgg11bn_8xb32_in1k.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/vgg/vgg11bn_b32x8_imagenet.py
rename to openmmlab_test/mmclassification-0.24.1/configs/vgg/vgg11bn_8xb32_in1k.py
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/vgg/vgg11bn_b32x8_imagenet.py b/openmmlab_test/mmclassification-0.24.1/configs/vgg/vgg11bn_b32x8_imagenet.py
new file mode 100644
index 00000000..350c9bef
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/vgg/vgg11bn_b32x8_imagenet.py
@@ -0,0 +1,6 @@
+_base_ = 'vgg11bn_8xb32_in1k.py'
+
+_deprecation_ = dict(
+ expected='vgg11bn_8xb32_in1k.py',
+ reference='https://github.com/open-mmlab/mmclassification/pull/508',
+)
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/vgg/vgg13_b32x8_imagenet.py b/openmmlab_test/mmclassification-0.24.1/configs/vgg/vgg13_8xb32_in1k.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/vgg/vgg13_b32x8_imagenet.py
rename to openmmlab_test/mmclassification-0.24.1/configs/vgg/vgg13_8xb32_in1k.py
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/vgg/vgg13_b32x8_imagenet.py b/openmmlab_test/mmclassification-0.24.1/configs/vgg/vgg13_b32x8_imagenet.py
new file mode 100644
index 00000000..6198ca2c
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/vgg/vgg13_b32x8_imagenet.py
@@ -0,0 +1,6 @@
+_base_ = 'vgg13_8xb32_in1k.py'
+
+_deprecation_ = dict(
+ expected='vgg13_8xb32_in1k.py',
+ reference='https://github.com/open-mmlab/mmclassification/pull/508',
+)
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/vgg/vgg13bn_b32x8_imagenet.py b/openmmlab_test/mmclassification-0.24.1/configs/vgg/vgg13bn_8xb32_in1k.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/vgg/vgg13bn_b32x8_imagenet.py
rename to openmmlab_test/mmclassification-0.24.1/configs/vgg/vgg13bn_8xb32_in1k.py
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/vgg/vgg13bn_b32x8_imagenet.py b/openmmlab_test/mmclassification-0.24.1/configs/vgg/vgg13bn_b32x8_imagenet.py
new file mode 100644
index 00000000..0a715d7f
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/vgg/vgg13bn_b32x8_imagenet.py
@@ -0,0 +1,6 @@
+_base_ = 'vgg13bn_8xb32_in1k.py'
+
+_deprecation_ = dict(
+ expected='vgg13bn_8xb32_in1k.py',
+ reference='https://github.com/open-mmlab/mmclassification/pull/508',
+)
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/vgg/vgg16_b16x8_voc.py b/openmmlab_test/mmclassification-0.24.1/configs/vgg/vgg16_8xb16_voc.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/vgg/vgg16_b16x8_voc.py
rename to openmmlab_test/mmclassification-0.24.1/configs/vgg/vgg16_8xb16_voc.py
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/vgg/vgg16_8xb32_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/vgg/vgg16_8xb32_in1k.py
new file mode 100644
index 00000000..a477db37
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/vgg/vgg16_8xb32_in1k.py
@@ -0,0 +1,7 @@
+_base_ = [
+ '../_base_/models/vgg16bn.py',
+ '../_base_/datasets/imagenet_bs32_pil_resize.py',
+ '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py'
+]
+optimizer = dict(lr=0.01)
+fp16 = dict(loss_scale=512.)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/vgg/vgg16_b16x8_voc.py b/openmmlab_test/mmclassification-0.24.1/configs/vgg/vgg16_b16x8_voc.py
new file mode 100644
index 00000000..06225e72
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/vgg/vgg16_b16x8_voc.py
@@ -0,0 +1,6 @@
+_base_ = 'vgg16_8xb16_voc.py'
+
+_deprecation_ = dict(
+ expected='vgg16_8xb16_voc.py',
+ reference='https://github.com/open-mmlab/mmclassification/pull/508',
+)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/vgg/vgg16_b32x8_imagenet.py b/openmmlab_test/mmclassification-0.24.1/configs/vgg/vgg16_b32x8_imagenet.py
new file mode 100644
index 00000000..2fefb949
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/vgg/vgg16_b32x8_imagenet.py
@@ -0,0 +1,6 @@
+_base_ = 'vgg16_8xb32_in1k.py'
+
+_deprecation_ = dict(
+ expected='vgg16_8xb32_in1k.py',
+ reference='https://github.com/open-mmlab/mmclassification/pull/508',
+)
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/vgg/vgg16bn_b32x8_imagenet.py b/openmmlab_test/mmclassification-0.24.1/configs/vgg/vgg16bn_8xb32_in1k.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/vgg/vgg16bn_b32x8_imagenet.py
rename to openmmlab_test/mmclassification-0.24.1/configs/vgg/vgg16bn_8xb32_in1k.py
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/vgg/vgg16bn_b32x8_imagenet.py b/openmmlab_test/mmclassification-0.24.1/configs/vgg/vgg16bn_b32x8_imagenet.py
new file mode 100644
index 00000000..cb21917f
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/vgg/vgg16bn_b32x8_imagenet.py
@@ -0,0 +1,6 @@
+_base_ = 'vgg16bn_8xb32_in1k.py'
+
+_deprecation_ = dict(
+ expected='vgg16bn_8xb32_in1k.py',
+ reference='https://github.com/open-mmlab/mmclassification/pull/508',
+)
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/vgg/vgg19_b32x8_imagenet.py b/openmmlab_test/mmclassification-0.24.1/configs/vgg/vgg19_8xb32_in1k.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/vgg/vgg19_b32x8_imagenet.py
rename to openmmlab_test/mmclassification-0.24.1/configs/vgg/vgg19_8xb32_in1k.py
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/vgg/vgg19_b32x8_imagenet.py b/openmmlab_test/mmclassification-0.24.1/configs/vgg/vgg19_b32x8_imagenet.py
new file mode 100644
index 00000000..e8b8b25a
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/vgg/vgg19_b32x8_imagenet.py
@@ -0,0 +1,6 @@
+_base_ = 'vgg19_8xb32_in1k.py'
+
+_deprecation_ = dict(
+ expected='vgg19_8xb32_in1k.py',
+ reference='https://github.com/open-mmlab/mmclassification/pull/508',
+)
diff --git a/openmmlab_test/mmclassification-speed-benchmark/configs/vgg/vgg19bn_b32x8_imagenet.py b/openmmlab_test/mmclassification-0.24.1/configs/vgg/vgg19bn_8xb32_in1k.py
similarity index 100%
rename from openmmlab_test/mmclassification-speed-benchmark/configs/vgg/vgg19bn_b32x8_imagenet.py
rename to openmmlab_test/mmclassification-0.24.1/configs/vgg/vgg19bn_8xb32_in1k.py
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/vgg/vgg19bn_b32x8_imagenet.py b/openmmlab_test/mmclassification-0.24.1/configs/vgg/vgg19bn_b32x8_imagenet.py
new file mode 100644
index 00000000..f615496c
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/vgg/vgg19bn_b32x8_imagenet.py
@@ -0,0 +1,6 @@
+_base_ = 'vgg19bn_8xb32_in1k.py'
+
+_deprecation_ = dict(
+ expected='vgg19bn_8xb32_in1k.py',
+ reference='https://github.com/open-mmlab/mmclassification/pull/508',
+)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/vision_transformer/README.md b/openmmlab_test/mmclassification-0.24.1/configs/vision_transformer/README.md
new file mode 100644
index 00000000..c35c242e
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/vision_transformer/README.md
@@ -0,0 +1,57 @@
+# Vision Transformer
+
+> [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/pdf/2010.11929.pdf)
+
+
+
+## Abstract
+
+While the Transformer architecture has become the de-facto standard for natural language processing tasks, its applications to computer vision remain limited. In vision, attention is either applied in conjunction with convolutional networks, or used to replace certain components of convolutional networks while keeping their overall structure in place. We show that this reliance on CNNs is not necessary and a pure transformer applied directly to sequences of image patches can perform very well on image classification tasks. When pre-trained on large amounts of data and transferred to multiple mid-sized or small image recognition benchmarks (ImageNet, CIFAR-100, VTAB, etc.), Vision Transformer (ViT) attains excellent results compared to state-of-the-art convolutional networks while requiring substantially fewer computational resources to train.
+
+
+

+
+
+## Results and models
+
+The training step of Vision Transformers is divided into two steps. The first
+step is training the model on a large dataset, like ImageNet-21k, and get the
+pre-trained model. And the second step is training the model on the target
+dataset, like ImageNet-1k, and get the fine-tuned model. Here, we provide both
+pre-trained models and fine-tuned models.
+
+### ImageNet-21k
+
+The pre-trained models on ImageNet-21k are used to fine-tune, and therefore don't have evaluation results.
+
+| Model | resolution | Params(M) | Flops(G) | Download |
+| :-------: | :--------: | :-------: | :------: | :--------------------------------------------------------------------------------------------------------------------------------------: |
+| ViT-B16\* | 224x224 | 86.86 | 33.03 | [model](https://download.openmmlab.com/mmclassification/v0/vit/pretrain/vit-base-p16_3rdparty_pt-64xb64_in1k-224_20210928-02284250.pth) |
+| ViT-B32\* | 224x224 | 88.30 | 8.56 | [model](https://download.openmmlab.com/mmclassification/v0/vit/pretrain/vit-base-p32_3rdparty_pt-64xb64_in1k-224_20210928-eee25dd4.pth) |
+| ViT-L16\* | 224x224 | 304.72 | 116.68 | [model](https://download.openmmlab.com/mmclassification/v0/vit/pretrain/vit-large-p16_3rdparty_pt-64xb64_in1k-224_20210928-0001f9a1.pth) |
+
+*Models with * are converted from the [official repo](https://github.com/google-research/vision_transformer#available-vit-models).*
+
+### ImageNet-1k
+
+| Model | Pretrain | resolution | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download |
+| :-----------: | :----------: | :--------: | :-------: | :------: | :-------: | :-------: | :--------------------------------------------------------------: | :----------------------------------------------------------------: |
+| ViT-B16\* | ImageNet-21k | 384x384 | 86.86 | 33.03 | 85.43 | 97.77 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/vision_transformer/vit-base-p16_ft-64xb64_in1k-384.py) | [model](https://download.openmmlab.com/mmclassification/v0/vit/finetune/vit-base-p16_in21k-pre-3rdparty_ft-64xb64_in1k-384_20210928-98e8652b.pth) |
+| ViT-B32\* | ImageNet-21k | 384x384 | 88.30 | 8.56 | 84.01 | 97.08 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/vision_transformer/vit-base-p32_ft-64xb64_in1k-384.py) | [model](https://download.openmmlab.com/mmclassification/v0/vit/finetune/vit-base-p32_in21k-pre-3rdparty_ft-64xb64_in1k-384_20210928-9cea8599.pth) |
+| ViT-L16\* | ImageNet-21k | 384x384 | 304.72 | 116.68 | 85.63 | 97.63 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/vision_transformer/vit-large-p16_ft-64xb64_in1k-384.py) | [model](https://download.openmmlab.com/mmclassification/v0/vit/finetune/vit-large-p16_in21k-pre-3rdparty_ft-64xb64_in1k-384_20210928-b20ba619.pth) |
+| ViT-B16 (IPU) | ImageNet-21k | 224x224 | 86.86 | 33.03 | 81.22 | 95.56 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/vision_transformer/vit-base-p16_ft-4xb544-ipu_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/vit/vit-base-p16_ft-4xb544-ipu_in1k_20220603-c215811a.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/vit/vit-base-p16_ft-4xb544-ipu_in1k.log) |
+
+*Models with * are converted from the [official repo](https://github.com/google-research/vision_transformer#available-vit-models). The config files of these models are only for validation. We don't ensure these config files' training accuracy and welcome you to contribute your reproduction results.*
+
+## Citation
+
+```
+@inproceedings{
+ dosovitskiy2021an,
+ title={An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale},
+ author={Alexey Dosovitskiy and Lucas Beyer and Alexander Kolesnikov and Dirk Weissenborn and Xiaohua Zhai and Thomas Unterthiner and Mostafa Dehghani and Matthias Minderer and Georg Heigold and Sylvain Gelly and Jakob Uszkoreit and Neil Houlsby},
+ booktitle={International Conference on Learning Representations},
+ year={2021},
+ url={https://openreview.net/forum?id=YicbFdNTTy}
+}
+```
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/vision_transformer/metafile.yml b/openmmlab_test/mmclassification-0.24.1/configs/vision_transformer/metafile.yml
new file mode 100644
index 00000000..9ac80469
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/vision_transformer/metafile.yml
@@ -0,0 +1,79 @@
+Collections:
+ - Name: Vision Transformer
+ Metadata:
+ Architecture:
+ - Attention Dropout
+ - Convolution
+ - Dense Connections
+ - Dropout
+ - GELU
+ - Layer Normalization
+ - Multi-Head Attention
+ - Scaled Dot-Product Attention
+ - Tanh Activation
+ Paper:
+ URL: https://arxiv.org/pdf/2010.11929.pdf
+ Title: 'An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale'
+ README: configs/vision_transformer/README.md
+ Code:
+ URL: https://github.com/open-mmlab/mmclassification/blob/v0.17.0/mmcls/models/backbones/vision_transformer.py
+ Version: v0.17.0
+
+Models:
+ - Name: vit-base-p16_in21k-pre-3rdparty_ft-64xb64_in1k-384
+ In Collection: Vision Transformer
+ Metadata:
+ FLOPs: 33030000000
+ Parameters: 86860000
+ Training Data:
+ - ImageNet-21k
+ - ImageNet-1k
+ Results:
+ - Dataset: ImageNet-1k
+ Task: Image Classification
+ Metrics:
+ Top 1 Accuracy: 85.43
+ Top 5 Accuracy: 97.77
+ Weights: https://download.openmmlab.com/mmclassification/v0/vit/finetune/vit-base-p16_in21k-pre-3rdparty_ft-64xb64_in1k-384_20210928-98e8652b.pth
+ Converted From:
+ Weights: https://console.cloud.google.com/storage/browser/_details/vit_models/augreg/B_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz
+ Code: https://github.com/google-research/vision_transformer/blob/88a52f8892c80c10de99194990a517b4d80485fd/vit_jax/models.py#L208
+ Config: configs/vision_transformer/vit-base-p16_ft-64xb64_in1k-384.py
+ - Name: vit-base-p32_in21k-pre-3rdparty_ft-64xb64_in1k-384
+ In Collection: Vision Transformer
+ Metadata:
+ FLOPs: 8560000000
+ Parameters: 88300000
+ Training Data:
+ - ImageNet-21k
+ - ImageNet-1k
+ Results:
+ - Dataset: ImageNet-1k
+ Task: Image Classification
+ Metrics:
+ Top 1 Accuracy: 84.01
+ Top 5 Accuracy: 97.08
+ Weights: https://download.openmmlab.com/mmclassification/v0/vit/finetune/vit-base-p32_in21k-pre-3rdparty_ft-64xb64_in1k-384_20210928-9cea8599.pth
+ Converted From:
+ Weights: https://console.cloud.google.com/storage/browser/_details/vit_models/augreg/B_32-i21k-300ep-lr_0.001-aug_light1-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.01-res_384.npz
+ Code: https://github.com/google-research/vision_transformer/blob/88a52f8892c80c10de99194990a517b4d80485fd/vit_jax/models.py#L208
+ Config: configs/vision_transformer/vit-base-p32_ft-64xb64_in1k-384.py
+ - Name: vit-large-p16_in21k-pre-3rdparty_ft-64xb64_in1k-384
+ In Collection: Vision Transformer
+ Metadata:
+ FLOPs: 116680000000
+ Parameters: 304720000
+ Training Data:
+ - ImageNet-21k
+ - ImageNet-1k
+ Results:
+ - Dataset: ImageNet-1k
+ Task: Image Classification
+ Metrics:
+ Top 1 Accuracy: 85.63
+ Top 5 Accuracy: 97.63
+ Weights: https://download.openmmlab.com/mmclassification/v0/vit/finetune/vit-large-p16_in21k-pre-3rdparty_ft-64xb64_in1k-384_20210928-b20ba619.pth
+ Converted From:
+ Weights: https://console.cloud.google.com/storage/browser/_details/vit_models/augreg/L_16-i21k-300ep-lr_0.001-aug_strong1-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.01-res_384.npz
+ Code: https://github.com/google-research/vision_transformer/blob/88a52f8892c80c10de99194990a517b4d80485fd/vit_jax/models.py#L208
+ Config: configs/vision_transformer/vit-large-p16_ft-64xb64_in1k-384.py
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/vision_transformer/vit-base-p16_ft-4xb544-ipu_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/vision_transformer/vit-base-p16_ft-4xb544-ipu_in1k.py
new file mode 100644
index 00000000..097d8d6b
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/vision_transformer/vit-base-p16_ft-4xb544-ipu_in1k.py
@@ -0,0 +1,115 @@
+_base_ = [
+ '../_base_/models/vit-base-p16.py',
+ '../_base_/datasets/imagenet_bs64_pil_resize_autoaug.py',
+ '../_base_/default_runtime.py'
+]
+
+# specific to vit pretrain
+paramwise_cfg = dict(custom_keys={
+ '.cls_token': dict(decay_mult=0.0),
+ '.pos_embed': dict(decay_mult=0.0)
+})
+
+pretrained = 'https://download.openmmlab.com/mmclassification/v0/vit/pretrain/vit-base-p16_3rdparty_pt-64xb64_in1k-224_20210928-02284250.pth' # noqa
+
+model = dict(
+ head=dict(
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0, _delete_=True), ),
+ backbone=dict(
+ img_size=224,
+ init_cfg=dict(
+ type='Pretrained',
+ checkpoint=pretrained,
+ _delete_=True,
+ prefix='backbone')))
+
+img_norm_cfg = dict(
+ mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_rgb=True)
+
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='RandomResizedCrop', size=224, backend='pillow'),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='ToHalf', keys=['img']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='Resize', size=(224, -1), backend='pillow'),
+ dict(type='CenterCrop', crop_size=224),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToHalf', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+
+# change batch size
+data = dict(
+ samples_per_gpu=17,
+ workers_per_gpu=16,
+ drop_last=True,
+ train=dict(pipeline=train_pipeline),
+ train_dataloader=dict(mode='async'),
+ val=dict(pipeline=test_pipeline, ),
+ val_dataloader=dict(samples_per_gpu=4, workers_per_gpu=1),
+ test=dict(pipeline=test_pipeline),
+ test_dataloader=dict(samples_per_gpu=4, workers_per_gpu=1))
+
+# remove clip-norm
+optimizer_config = dict()
+
+# optimizer
+optimizer = dict(
+ type='SGD',
+ lr=0.08,
+ weight_decay=1e-5,
+ momentum=0.9,
+ paramwise_cfg=paramwise_cfg,
+)
+
+# learning policy
+lr_config = dict(
+ policy='CosineAnnealing',
+ min_lr=0,
+ warmup='linear',
+ warmup_iters=800,
+ warmup_ratio=0.02,
+)
+
+# ipu cfg
+# model partition config
+ipu_model_cfg = dict(
+ train_split_edges=[
+ dict(layer_to_call='backbone.patch_embed', ipu_id=0),
+ dict(layer_to_call='backbone.layers.3', ipu_id=1),
+ dict(layer_to_call='backbone.layers.6', ipu_id=2),
+ dict(layer_to_call='backbone.layers.9', ipu_id=3)
+ ],
+ train_ckpt_nodes=['backbone.layers.{}'.format(i) for i in range(12)])
+
+# device config
+options_cfg = dict(
+ randomSeed=42,
+ partialsType='half',
+ train_cfg=dict(
+ executionStrategy='SameAsIpu',
+ Training=dict(gradientAccumulation=32),
+ availableMemoryProportion=[0.3, 0.3, 0.3, 0.3],
+ ),
+ eval_cfg=dict(deviceIterations=1, ),
+)
+
+# add model partition config and device config to runner
+runner = dict(
+ type='IterBasedRunner',
+ ipu_model_cfg=ipu_model_cfg,
+ options_cfg=options_cfg,
+ max_iters=5000)
+
+checkpoint_config = dict(interval=1000)
+
+fp16 = dict(loss_scale=256.0, velocity_accum_type='half', accum_type='half')
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/vision_transformer/vit-base-p16_ft-64xb64_in1k-384.py b/openmmlab_test/mmclassification-0.24.1/configs/vision_transformer/vit-base-p16_ft-64xb64_in1k-384.py
new file mode 100644
index 00000000..cb42d0d8
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/vision_transformer/vit-base-p16_ft-64xb64_in1k-384.py
@@ -0,0 +1,36 @@
+_base_ = [
+ '../_base_/models/vit-base-p16.py',
+ '../_base_/datasets/imagenet_bs64_pil_resize_autoaug.py',
+ '../_base_/schedules/imagenet_bs4096_AdamW.py',
+ '../_base_/default_runtime.py'
+]
+
+model = dict(backbone=dict(img_size=384))
+
+img_norm_cfg = dict(
+ mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_rgb=True)
+
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='RandomResizedCrop', size=384, backend='pillow'),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='Resize', size=(384, -1), backend='pillow'),
+ dict(type='CenterCrop', crop_size=384),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+
+data = dict(
+ train=dict(pipeline=train_pipeline),
+ val=dict(pipeline=test_pipeline),
+ test=dict(pipeline=test_pipeline),
+)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/vision_transformer/vit-base-p16_pt-64xb64_in1k-224.py b/openmmlab_test/mmclassification-0.24.1/configs/vision_transformer/vit-base-p16_pt-64xb64_in1k-224.py
new file mode 100644
index 00000000..79c323b1
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/vision_transformer/vit-base-p16_pt-64xb64_in1k-224.py
@@ -0,0 +1,12 @@
+_base_ = [
+ '../_base_/models/vit-base-p16.py',
+ '../_base_/datasets/imagenet_bs64_pil_resize_autoaug.py',
+ '../_base_/schedules/imagenet_bs4096_AdamW.py',
+ '../_base_/default_runtime.py'
+]
+
+model = dict(
+ head=dict(hidden_dim=3072),
+ train_cfg=dict(
+ augments=dict(type='BatchMixup', alpha=0.2, num_classes=1000,
+ prob=1.)))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/vision_transformer/vit-base-p32_ft-64xb64_in1k-384.py b/openmmlab_test/mmclassification-0.24.1/configs/vision_transformer/vit-base-p32_ft-64xb64_in1k-384.py
new file mode 100644
index 00000000..0386fef1
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/vision_transformer/vit-base-p32_ft-64xb64_in1k-384.py
@@ -0,0 +1,36 @@
+_base_ = [
+ '../_base_/models/vit-base-p32.py',
+ '../_base_/datasets/imagenet_bs64_pil_resize_autoaug.py',
+ '../_base_/schedules/imagenet_bs4096_AdamW.py',
+ '../_base_/default_runtime.py'
+]
+
+model = dict(backbone=dict(img_size=384))
+
+img_norm_cfg = dict(
+ mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_rgb=True)
+
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='RandomResizedCrop', size=384, backend='pillow'),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='Resize', size=(384, -1), backend='pillow'),
+ dict(type='CenterCrop', crop_size=384),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+
+data = dict(
+ train=dict(pipeline=train_pipeline),
+ val=dict(pipeline=test_pipeline),
+ test=dict(pipeline=test_pipeline),
+)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/vision_transformer/vit-base-p32_pt-64xb64_in1k-224.py b/openmmlab_test/mmclassification-0.24.1/configs/vision_transformer/vit-base-p32_pt-64xb64_in1k-224.py
new file mode 100644
index 00000000..a477e211
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/vision_transformer/vit-base-p32_pt-64xb64_in1k-224.py
@@ -0,0 +1,12 @@
+_base_ = [
+ '../_base_/models/vit-base-p32.py',
+ '../_base_/datasets/imagenet_bs64_pil_resize_autoaug.py',
+ '../_base_/schedules/imagenet_bs4096_AdamW.py',
+ '../_base_/default_runtime.py'
+]
+
+model = dict(
+ head=dict(hidden_dim=3072),
+ train_cfg=dict(
+ augments=dict(type='BatchMixup', alpha=0.2, num_classes=1000,
+ prob=1.)))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/vision_transformer/vit-large-p16_ft-64xb64_in1k-384.py b/openmmlab_test/mmclassification-0.24.1/configs/vision_transformer/vit-large-p16_ft-64xb64_in1k-384.py
new file mode 100644
index 00000000..5be99188
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/vision_transformer/vit-large-p16_ft-64xb64_in1k-384.py
@@ -0,0 +1,36 @@
+_base_ = [
+ '../_base_/models/vit-large-p16.py',
+ '../_base_/datasets/imagenet_bs64_pil_resize_autoaug.py',
+ '../_base_/schedules/imagenet_bs4096_AdamW.py',
+ '../_base_/default_runtime.py'
+]
+
+model = dict(backbone=dict(img_size=384))
+
+img_norm_cfg = dict(
+ mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_rgb=True)
+
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='RandomResizedCrop', size=384, backend='pillow'),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='Resize', size=(384, -1), backend='pillow'),
+ dict(type='CenterCrop', crop_size=384),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+
+data = dict(
+ train=dict(pipeline=train_pipeline),
+ val=dict(pipeline=test_pipeline),
+ test=dict(pipeline=test_pipeline),
+)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/vision_transformer/vit-large-p16_pt-64xb64_in1k-224.py b/openmmlab_test/mmclassification-0.24.1/configs/vision_transformer/vit-large-p16_pt-64xb64_in1k-224.py
new file mode 100644
index 00000000..5cf7a7d3
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/vision_transformer/vit-large-p16_pt-64xb64_in1k-224.py
@@ -0,0 +1,12 @@
+_base_ = [
+ '../_base_/models/vit-large-p16.py',
+ '../_base_/datasets/imagenet_bs64_pil_resize_autoaug.py',
+ '../_base_/schedules/imagenet_bs4096_AdamW.py',
+ '../_base_/default_runtime.py'
+]
+
+model = dict(
+ head=dict(hidden_dim=3072),
+ train_cfg=dict(
+ augments=dict(type='BatchMixup', alpha=0.2, num_classes=1000,
+ prob=1.)))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/vision_transformer/vit-large-p32_ft-64xb64_in1k-384.py b/openmmlab_test/mmclassification-0.24.1/configs/vision_transformer/vit-large-p32_ft-64xb64_in1k-384.py
new file mode 100644
index 00000000..60506b02
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/vision_transformer/vit-large-p32_ft-64xb64_in1k-384.py
@@ -0,0 +1,37 @@
+# Refer to pytorch-image-models
+_base_ = [
+ '../_base_/models/vit-large-p32.py',
+ '../_base_/datasets/imagenet_bs64_pil_resize_autoaug.py',
+ '../_base_/schedules/imagenet_bs4096_AdamW.py',
+ '../_base_/default_runtime.py'
+]
+
+model = dict(backbone=dict(img_size=384))
+
+img_norm_cfg = dict(
+ mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_rgb=True)
+
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='RandomResizedCrop', size=384, backend='pillow'),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='Resize', size=(384, -1), backend='pillow'),
+ dict(type='CenterCrop', crop_size=384),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+
+data = dict(
+ train=dict(pipeline=train_pipeline),
+ val=dict(pipeline=test_pipeline),
+ test=dict(pipeline=test_pipeline),
+)
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/vision_transformer/vit-large-p32_pt-64xb64_in1k-224.py b/openmmlab_test/mmclassification-0.24.1/configs/vision_transformer/vit-large-p32_pt-64xb64_in1k-224.py
new file mode 100644
index 00000000..773ade87
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/vision_transformer/vit-large-p32_pt-64xb64_in1k-224.py
@@ -0,0 +1,12 @@
+_base_ = [
+ '../_base_/models/vit-large-p32.py',
+ '../_base_/datasets/imagenet_bs64_pil_resize_autoaug.py',
+ '../_base_/schedules/imagenet_bs4096_AdamW.py',
+ '../_base_/default_runtime.py'
+]
+
+model = dict(
+ head=dict(hidden_dim=3072),
+ train_cfg=dict(
+ augments=dict(type='BatchMixup', alpha=0.2, num_classes=1000,
+ prob=1.)))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/wrn/README.md b/openmmlab_test/mmclassification-0.24.1/configs/wrn/README.md
new file mode 100644
index 00000000..b036caaf
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/wrn/README.md
@@ -0,0 +1,35 @@
+# Wide-ResNet
+
+> [Wide Residual Networks](https://arxiv.org/abs/1605.07146)
+
+
+
+## Abstract
+
+Deep residual networks were shown to be able to scale up to thousands of layers and still have improving performance. However, each fraction of a percent of improved accuracy costs nearly doubling the number of layers, and so training very deep residual networks has a problem of diminishing feature reuse, which makes these networks very slow to train. To tackle these problems, in this paper we conduct a detailed experimental study on the architecture of ResNet blocks, based on which we propose a novel architecture where we decrease depth and increase width of residual networks. We call the resulting network structures wide residual networks (WRNs) and show that these are far superior over their commonly used thin and very deep counterparts. For example, we demonstrate that even a simple 16-layer-deep wide residual network outperforms in accuracy and efficiency all previous deep residual networks, including thousand-layer-deep networks, achieving new state-of-the-art results on CIFAR, SVHN, COCO, and significant improvements on ImageNet.
+
+
+

+
+
+## Results and models
+
+### ImageNet-1k
+
+| Model | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download |
+| :-------------: | :-------: | :------: | :-------: | :-------: | :------------------------------------------------------------------------: | :--------------------------------------------------------------------------: |
+| WRN-50\* | 68.88 | 11.44 | 78.48 | 94.08 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/wrn/wide-resnet50_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/wrn/wide-resnet50_3rdparty_8xb32_in1k_20220304-66678344.pth) |
+| WRN-101\* | 126.89 | 22.81 | 78.84 | 94.28 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/wrn/wide-resnet101_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/wrn/wide-resnet101_3rdparty_8xb32_in1k_20220304-8d5f9d61.pth) |
+| WRN-50 (timm)\* | 68.88 | 11.44 | 81.45 | 95.53 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/wrn/wide-resnet50_timm_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/wrn/wide-resnet50_3rdparty-timm_8xb32_in1k_20220304-83ae4399.pth) |
+
+*Models with * are converted from the [TorchVision](https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py) and [TIMM](https://github.com/rwightman/pytorch-image-models/blob/master). The config files of these models are only for inference. We don't ensure these config files' training accuracy and welcome you to contribute your reproduction results.*
+
+## Citation
+
+```bibtex
+@INPROCEEDINGS{Zagoruyko2016WRN,
+ author = {Sergey Zagoruyko and Nikos Komodakis},
+ title = {Wide Residual Networks},
+ booktitle = {BMVC},
+ year = {2016}}
+```
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/wrn/metafile.yml b/openmmlab_test/mmclassification-0.24.1/configs/wrn/metafile.yml
new file mode 100644
index 00000000..cc37eefd
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/wrn/metafile.yml
@@ -0,0 +1,77 @@
+Collections:
+ - Name: Wide-ResNet
+ Metadata:
+ Training Data: ImageNet-1k
+ Training Techniques:
+ - SGD with Momentum
+ - Weight Decay
+ Training Resources: 8x V100 GPUs
+ Epochs: 100
+ Batch Size: 256
+ Architecture:
+ - 1x1 Convolution
+ - Batch Normalization
+ - Convolution
+ - Global Average Pooling
+ - Max Pooling
+ - ReLU
+ - Residual Connection
+ - Softmax
+ - Wide Residual Block
+ Paper:
+ URL: https://arxiv.org/abs/1605.07146
+ Title: "Wide Residual Networks"
+ README: configs/wrn/README.md
+ Code:
+ URL: https://github.com/open-mmlab/mmclassification/blob/v0.20.1/mmcls/models/backbones/resnet.py#L383
+ Version: v0.20.1
+
+Models:
+ - Name: wide-resnet50_3rdparty_8xb32_in1k
+ Metadata:
+ FLOPs: 11440000000 # 11.44G
+ Parameters: 68880000 # 68.88M
+ In Collection: Wide-ResNet
+ Results:
+ - Task: Image Classification
+ Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 78.48
+ Top 5 Accuracy: 94.08
+ Weights: https://download.openmmlab.com/mmclassification/v0/wrn/wide-resnet50_3rdparty_8xb32_in1k_20220304-66678344.pth
+ Config: configs/wrn/wide-resnet50_8xb32_in1k.py
+ Converted From:
+ Weights: https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth
+ Code: https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py
+ - Name: wide-resnet101_3rdparty_8xb32_in1k
+ Metadata:
+ FLOPs: 22810000000 # 22.81G
+ Parameters: 126890000 # 126.89M
+ In Collection: Wide-ResNet
+ Results:
+ - Task: Image Classification
+ Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 78.84
+ Top 5 Accuracy: 94.28
+ Weights: https://download.openmmlab.com/mmclassification/v0/wrn/wide-resnet101_3rdparty_8xb32_in1k_20220304-8d5f9d61.pth
+ Config: configs/wrn/wide-resnet101_8xb32_in1k.py
+ Converted From:
+ Weights: https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth
+ Code: https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py
+ - Name: wide-resnet50_3rdparty-timm_8xb32_in1k
+ Metadata:
+ FLOPs: 11440000000 # 11.44G
+ Parameters: 68880000 # 68.88M
+ In Collection: Wide-ResNet
+ Results:
+ - Task: Image Classification
+ Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 81.45
+ Top 5 Accuracy: 95.53
+ Weights: https://download.openmmlab.com/mmclassification/v0/wrn/wide-resnet50_3rdparty-timm_8xb32_in1k_20220304-83ae4399.pth
+ Config: configs/wrn/wide-resnet50_timm_8xb32_in1k.py
+ Converted From:
+ Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/wide_resnet50_racm-8234f177.pth
+ Code: https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/resnet.py
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/wrn/wide-resnet101_8xb32_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/wrn/wide-resnet101_8xb32_in1k.py
new file mode 100644
index 00000000..d1bf5e5e
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/wrn/wide-resnet101_8xb32_in1k.py
@@ -0,0 +1,7 @@
+_base_ = [
+ '../_base_/models/wide-resnet50.py',
+ '../_base_/datasets/imagenet_bs32_pil_resize.py',
+ '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py'
+]
+
+model = dict(backbone=dict(depth=101))
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/wrn/wide-resnet50_8xb32_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/wrn/wide-resnet50_8xb32_in1k.py
new file mode 100644
index 00000000..edf6a051
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/wrn/wide-resnet50_8xb32_in1k.py
@@ -0,0 +1,5 @@
+_base_ = [
+ '../_base_/models/wide-resnet50.py',
+ '../_base_/datasets/imagenet_bs32_pil_resize.py',
+ '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py'
+]
diff --git a/openmmlab_test/mmclassification-0.24.1/configs/wrn/wide-resnet50_timm_8xb32_in1k.py b/openmmlab_test/mmclassification-0.24.1/configs/wrn/wide-resnet50_timm_8xb32_in1k.py
new file mode 100644
index 00000000..8dca8f37
--- /dev/null
+++ b/openmmlab_test/mmclassification-0.24.1/configs/wrn/wide-resnet50_timm_8xb32_in1k.py
@@ -0,0 +1,5 @@
+_base_ = [
+ '../_base_/models/wide-resnet50.py',
+ '../_base_/datasets/imagenet_bs32_pil_bicubic.py',
+ '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py'
+]
diff --git a/openmmlab_test/mmclassification-0.24.1/demo/bird.JPEG b/openmmlab_test/mmclassification-0.24.1/demo/bird.JPEG
new file mode 100644
index 0000000000000000000000000000000000000000..9c132a099e87d1c3c1a76dfd9201b03801301eab
GIT binary patch
literal 74237
zcmcG$1z1$w+c&yr7`jVo1{hk9kPrlhW{^$^k?v0ERzT@SS{g}70YRlhx*J6rrKQh+
z&-4DD=l$Muec!pxIcu}mzH8m?^(Y+!}Zkl5_$o=Y2@taU}|LI<^r`ddv0Z7WbcM-PY&Szm)CnxWw{#(ww!~Tiy2hL%KoNbasZ-e
zWP9ra_c_w3B{CXk2Rkd18x@|XnXRpZ2h`oj*$Szn0=ojl{|+3=!^O>YJ4VLaF%p1(
zd;`xb*0&n%EI;xV>i=+~L)rPGyU7p$LT-N@Z`(25>hNys+e$0|_>(oodrPXXUXaCFPa0r$<#ks4cMjdm*m{UymsP5Uni4^jgH
zbdmMnHhi}dj{it*ef~!B0N{u`>;GdxS-(;LkE-kcH@e$gVBGiyZpMlnCm8-`|0{1a
zNGSl^UURp$6gMnNBC`53o*Ndx%mx7THYC9XNB{_c8+qgc9w2u`h&ijjc_P++Xd~OkBB<+-({(
zkdx8Y!QKMN-xdWkbJu_J60Qz*fB2iU0Du~D_Wj{+LIcoTY^_YpT(oQzT#y{-U;Uuo
z>X2e02Bui`ahU!{Vo2tIkvHdq=T)4^FNYH_{YD5?0+ecO#mea
z`lQt07u-^
z!PxAXq$1K{7r?va08?ZuBtHv;05}*J7?>D1n3y<(*jU(vr1&^E_@rdS#H7T;WP~`k
z<*&mZ=D*hbvsB586_(68GG}X{Zmi?5Ez1rhK_-Wg^hfm{96eEz$kw!2>=iTgaU?uQPI&b
zP*HGrkxD`o2pthOs<^5VF}+hD4;o2qPL%`$j8yHDG4BKCpkYaL#yIs~%X=m+e0OrI
zgPEk_HB6;HzjfumJEFN_w*Nll%c$G80~zLoywIA&_Ob7W`L!M6t4CVq?k|!G>N+QW
z91F;5TX?)oF0AjGTst8Iz+hywklUEh(NOtrL!jd(f*>Psq9;b>35+GViQtpk@GoAN
z@v?JJoTNIbiOYjMMzos%(3$u?k0671d)HK2li$^B<-bDszmI_VKOtPt05~YOeG&p<
zz@|iuDwenwPH8BYh7oM3z&q&Gybxq0#+uE%EsTURH)WiZ74|D7#7aJ48A=i$A8Y&c
zb09{@v1&*$|LdSP)(@CwLfM8s%QdOVCpugYZA!e+V+~f-B{l4?VWLyyaVGm5V;!$I
zui3l#-1XrRRRp;UR8Y@`9mpEEWS`oit=T=C3i}vz4J02W%!1@N%?HcuI%;>C?^fjn
znlKpAwiqm))#ksNEUp?qi*ziQw6mL)oB26kuUKyX^H-uT{;vp(1E&*_=9o;@XBfOF
zLC=;uVwRKfN#tS{M>g9x(@~)dJ;$GG+CLLH1?24rs)E%d3DQwTikBI=K`iRH%9yM)
zW3ldeaaq^HTLJ1+EX8%ChP_2f6FrKDujy+miGz}HmT(cvo+lIA&DdT|WhDh~021A`
z2O`P*Rd3%p>{iVdn_L4$P4@4zw3RpNpPAIP4BX3qkLNPKO(`Zk-RQac#NJ!PwjOI{
zA}qkbL}k!n#AUupI_rM@mv1ANkN4Vo4<$}qPtv%Zb_6F0^ye&y&v=e^{U#Q=gh$@b
zTmx>cr-2_hiP!tO8_e!#9S3kfE1ILiZykLNnkW)+npMm
zVjgP(mlW%VT2)pQP0OQNR3Cab8)^-8lg44^8n11QSkz~d#u7S-Kl|qJAT(fc2-fS7
zt)MtVLb4vsFYIh-g(h1Ch4WUknu`2Sd?SL>-!#kgab0zU^>&%>Fv>DcnVp7T3qL4IBUp(%LL9*;c0IbC#w=}K
z2Ny5?P-=%o_Ci8qqM)zyAz8A@5cWHhw4TfLWB(CpOLYorVdn7&J%{L{cMhr@Ut|S)
zsAPXMma;WEnphZo`>K2;?JlPO!Nqd(ZdI_MkLiSLXnafSRBgGwuid?dN^3qxhK7LU
zQ%y+|$>MO^@Tb<@;68SjbiZ^VH}v|ZbNSIS&gXfAIt04ET<4d6U!qEk=zn_x^Sn@>
zK#e?X5U6+Dvq>#j%0$1j93}Irf%|GY?6|Q>#Qnn?3S)r`@?DQ%_sh)WA53ShZ&Hu7
z$$bShh84dOjkxy}xs_c5h%QkHlcR%X(Fsx7}7DAUb&jGDo@
zH_o27FtD1Mh)%FpH&GkugD>wy6Q#i$95?gi-47YkI^Pj5LfBIzPY+JjF>VxPqF>Tt>
zpPZ&mL~Hg1Un(r!?)ZP6yas|b-u7UR`!Ei#@}w-CM8Dk>a8vpvGV^n@Q4@8LPteLu
z2@=gfR1up~u*_-s67Sq%<`|mpyINuLVrSq4e|`tEa+qgalu6QA&e5?HT2q5^s1UjE
zCxn>luG0w!=A?
zzgj!>tfuP8X0?~yo4HAT?6fau1O79e3+djMMYyfS56j8NHLihkl|F0kqB~stJ6XT6
z=b8fUW-NW@uc&w^_|B0oL%eRX&cv)u=Nib|FEfZF$ihYCNOsGiCy6IneMk3zHKB*m
z5&?^oydM9_+4s5KcJ=#cl-RPZjo9Yg``xpRBW0?N47Fs!nU)FvwStGw)!$X^a#oFe
z>tWWU@fY5I`{lI*y+8i>cl|*)t)0j3uCvSqUYehq!xopPWlDp4kw+qPKSbWojapnD
zl^f7g_$be*Z@V6x^oo`2o%dR>TiEwG-g9jVi!`l`i>Z3t*z16e5@&sSahoAqcM
zI(yo2=biYq&Vpsv^meBv>QB-?df`?&`mJP?@y}|)EZ2ZFSswp=R&4tS1BpvssgO`-<@Y$CZ2h)5%{qqm~m&woYM;RT)THhszoiUUh_V1Qc
zF`v?=udZF?x7QwdV-Rx0c=+BWr`hQn6=MFqY(Akq>6kt;h1c7`^N7N>t7UQR%;OrE
z7?Z*NonBIDSAP6(09yH_eJgL5_AseXuG5|0tD3D$T$-XEHk)w`5>49&J^FI;7fI!{M_tT5R8l0hq-%+ovf
zqv7f@x~%o6gQi{~sATx?{GHfSN3TSC(Ijn^+_=&6kh+q3f8CbSneMR)!L!m~
z8y2R*OP%UVJ#xAfy28M%n0Q{9ba}{Y`XE)QFFe~*--b|eleG}
zi&foTTfh?IJL;j???W>U%iiKtl1!W0oSkcc?>w1({2J&;mg>iQU-)od=v(YX4SgHFLCNI
z#&BsLBNNwH-nrR0&(n2RJ89HicyNtk@Wg+oevwHi5wcVz8U4&>}`KP`k~O?
zez~#P&d~+`M`QU^OY~;Nqyqjsz6gCZ0aY8b-|gLwwDSGPNf2W3llOF{^_NtX&>h^a
zJ&bSf%R|@InHAC!<4HCm0vZ#xfKzH(4EG@4Nl4}i!shL?ut!cfg=-1YP+eG$NUfMy^AP5v>u*?Wn~_GP~W7VF&cP2*Cn9USa?wCjITUc{VGK=
z<7sV!P!q*t{Ym`XV?p1$wKj*g`g?B@hqiL2WeTT8UoRF=>(C7rp3v5o6soVEzd25S
z=B=;}5lp%U1Shqx@aZdl)fAd^hl?LM6t_OrnIP+@$<}+OhBCE>Yqx}}|LIUC_9_W^
zuYdN-SHB|T%-m+?xU#uwn{Iq+6e4KdRkfIQTw!vSbM($FLbNAE(klZunFh6=_Bgld
zv`*k^44F%P<#cH$+08q_LoIZy5mpNFkzR3%tgyoCq1{hPql`FB()=vi1=i^~LHR7j
zDQxhZpiEUYoyX2J-(SVn@XjTDXH{-wSI2VX;@&SGcuP@IUG4Vq_yPJyG2bn*cfL9s
zzN+;)8v?XEQ_PG(m@a<76*j(g*TA#W@#02SY2_EbChMNmV%+D`>(hE|-BxED%FZxJAMzC^$duRjDa8D#7p{R#ho@hc
z;2d-V^pY>KgTK7FG-aj`vQ;NG@nOS$Yr~mFNl1f{?d|Mcoz-L|pigvkq3DB1NGuaa=NmNX0=e^;nzF|LPJI3KM)8SCK58bDX3_eSeO`C
zH_VNb8wWS7kbfA+R<|bqkE&br3UmmHM*$(>o)30GH
zUNz(8J9lB~&Ov*Uab%1SnE0hlT!Q0sN09EQkv)Nt-Tk9OB>M*nL#oA*p&@}N6jXF%
zb|7d-ObCghp%6lleIdaxG-7%qr$8hW_G$PRItdSh#Io8Rsj+h|#vNYCpt!p*#_E3q
zNhhcn&``q4S*HOX{S%aRDo~?v2QU-_@gQ|Ib5wQ>4=mbI1BqjbM6Xqnvh=pmD{Tlf
zKc8JF=j+rX!X(0`oOz^|#wB7KO8|=AP(xjqM>n%N
z9CR9ZiW}%#4+C6(9(2gvHAf(XGlgj^(PvyTqz;%@0t5|4Wc{pF<_Out!uSZJ8Z2VD
zKarqHypDO0=r0783dmRjgegF7gw3fa*5D$Q7|-L=Y;pvbvR%`7LYrF4qqz8e|HLP#
zB+E>uAk~4gK&c{{HoQ?a29`N1%5AjaixND`7*|3rI3E{*BPdSP5>%8Dd2%T&qxAz)
zPV%+}aki%s)O`wNi8DQ?DDaLC{;sPKr}X_K`lzIgzWtrLHiPnhajd!&Nwxrw@PTS(
zd&fjEFM8wL7!~I4nOGy*^M9I;6~qQ}u`p$dx9AOyE`V$Uf(@<>zsf
zQ-h;NADnA+u_q_pQG*1b6KZ|hNA1CmC1zqaT6m|_26c^cEoyhVvQtx^*hYQ4qRys5
zbHprS8=$gZ|A=hor#dQ1Tl8lIClXY@U$H#OmBr>GZS`~WQsMM|*3hlw`^DBO?67`K
zPQ`U4d{rqh;zHJG<~_z-!X(eBeX5#i
zo-KOBygMT6Xd~H>0ER6%36abfxsz=Ae1cfYi1=6N4-rHqVS>z!X<%t7R7XpI)!F^g
zg$rDT07I1s)r>tbUlUb1-kW4mWkNgBf9t5%
z$A-8fK6O#in7+3ilm?}3S08M6cVf=jku(g(FWOU0ur+)trwt+c2qL9)h83D(%4O$qO*$1)e@a1t1R8TUe~tvciG|hTw!4Q5&8J@7LSK4ssWwDL*Q+
z9}A%~(gdY2Sbvq073R*x2!L9}Q-TacgmZ^O;^gF}Rdueuqp2tj7cT;Tna~K9XN;9Qo)nG|
z>MfyCf8tAD$KqTwX%M3>X5Gr>sF*8(v4pzwVal(T%wmwD5*3elk10sQ$U}(Kpb447
zTI#&2F87a%0X&SWaU)fKLpTBk(gPZ5QZdnRhVY>P9gihgX2cx*pqTv$agaE}i06-j
z=0H?p;cXl8+A&l#aTpPPhjf=7N<1MZ;G@ofjzbcMFMLp|gaNSfcw+HIf5#%l6^>RO
zv`$QF180q3ptr87I<5mfs(uaJ4A-pfU1O3Vjoxi6LAP6j|}<3
zwZxgnFlo$JFnA#sBuLyp9aYJSCDy*BoVvP+74PumezB52V37}v`4l(Z+p%5{v
zO61df?<_p8ps~eaWK^XWSzEjyK?%pAf)l{gs)@cE^?s3K@H`BoF8T6kn=3;_!_a4<
zjY2|PG)r!$1*2DM&MjwFj){pLd6US!!BTIn)H37jL}FcP;}gsCR98)f;AyX
zx$>kqfZg?qh!p01Akl%Bi!W#x$egi-d%!f_9{wn};oCKk)}ufAF>l&Pl}CfwNk@LW
zTB*CCG88_$P$u79vXfi@ppO8*>DjvNrJ8Gi07U#uIqGmOKVkg?lwbt92Hu4VDrEpe
zY>Pa1!$U|APS{B)i7ggE5$~CcG$ptQT^<30AFrgUFcF!-9Cp;|xk#Xd-3$u>SaGuG
zaw(}l;q+%o{2+Pi@6tF=u>jLVl-URZhjyM2Vgmk?oJ2;W&Kc(CP-W4hd+)lQ3%vjU
zDGfVcj5QNETNtyUKij~ESW2r(k&tsIEmQbCr7qu(HQj*{k!q$OHT1D-D?wj+Ye
z!Xf3|cM0`%<=p^r6yL^oEQDWseTN)2AT;>arZU8pjCx>ytf=`dENX={Lrr^uB@KNt
z=ZU4UHtxB-Jyr54kbTOG+}VWlDf!oalC0AZUs=*6US3x-tYBdzQnplTe26k|4fK0t
ztX-UtveenUgww>pK6uXf~YY|D}g9`MYE
zH3PLS$V+R(h$6SANIBT0kJ2=Ty!D1
zQ2t6k(9uqQ%HflkZVZ*oP^Mo3N*E>>0d=62p8^G8y`=o5+yXG>VuT%^4H4NfKEhDM
zc~+znUj$dU7#Y}>aQ-eAkPxSIk@|pv*-0C*)79*rUt2}U$H%NtplqFB;^=fwoG8Y;
zFcvg1Lurdobq!djP}{XE5ud5x
zxbp-&qs5O&ctpFU+8tOv5CjWni2D4|*%eI{KwfFGZ7HO70@*Zn2v#&Yv9(LK_Hv`n
zZwR61%wEmi<_ctTsqwTU0d`m;7?5-StbAM0aU+)$t#!JZJ7@`)3ncRGTr+lxn2yPf
z`faW0XRWD{_;-*XJ{S6Y@+VPJ_wtH}a6D+n&0S%qU{zM(R8o2@Y7ACA>b$|8oJ39q
zGRK(g;?(0GAqVU_aY}JP2q1D(WiDUTvPhu>1M2V9Q0<^d_L%^P5$9%-C1Izs!kU(r
z5Q&thOjWDgB#La2T=Z&a8<7HF_!ZWPhm}-z++Zn<92!HnmauqU>XJ77*v4Ip)~aGj
zN71P`5WOeyLOBXt4oaJ+Ql3JZEzWGT2QM63A?9tQJXewTMT;Y@g!xIld!&3p-GS@q{D<;pZt^0~=W1
zyS#uvs$FCLY#hSIu(0DF0TE&HuoBU%iGVtN{cfNLQ`n)Ws867>qCe*b}@p`Pf1g|QrIXJ0-(_cUi}P)RQ!h2D-VlMZph_fu5@T%++a{owgR-DptV=dCCG{$>WQdJ~=$G$ejI-H+^=>l~btf053W28bonm!7L9*wA^|<-%uzaGK}&-4TVJkPn>G7
z7C1nR=Eb&(Wjv*R{Vd1Xi1jD>LY~S4U~W4{d*(biLS`u>tsAA1ituuiT?!Ai8yMVD
zg9KnL$SaQq!yWnYi+a#&cGnI=}hQt}&6rUe`Vg
zlOP^4=-VNork8w28^Z+w!Qaq$s81|671=YB4+&62TB;kV*?wx4_Lc<>6KANyA&sTDYF|9KRrX4(dhqKLM`WXQs5Gj(;n@f%}
zbd)20H1sqRh9oiZzBY
zMELcmU0!ka9ZEJsJmkWqo@WPI((^>_kKy6A8QhR-fLDzjONBs|Sfq;t*V(v_u_Q-?Pe($0-BNfU9Hs9lMh$VMsP`6x&Gu;9_oKC8bdm@UO#EHGqb9~S6Wdg0n_FF>
zQu!#U>3v^dQ%?d~Vr)nr&Z`aTl#ml538f9@6SPO8f4DYNhJt~BkO#vb%zy$Q5eaIB
zPE}l(sQQ;|D?EuW-%w}(Gi<7@^s()MgjB!SmgJc&)4*)D=N{R2ajm$Bi~QMu4W%F8
zD}j|l+-}9!jKvTl5UA~
z4E2|fivUSrofE+hW~#w-ke$V>GN8|ndYm;pDZn-eumIz-1baMB
zyPF%dBm=Lz)&4LP|7@_sr)HP$JEBr5t{BBhlE~*J5rea=QHL#Q^n;N^nivJy(=*LV
zEqIGAmxO8&;pLT$IVE_v&%|!j${mifr<<)S#9xUrOaO@-9w|3w+FvsSLA|mdc14mfNageh{3GMMM;v?a+bIc!-m9
zj0So%^yLjkf>T{qyi`qNvhX_3EIgJKWhp{0mTKFR15{v2R!U-5N>2$7!u-}+E3ewX
zlGK*6E9_CSe*}ska_t8lsJ|K_k~j4uzbG+G3MjgOX5{81E-aPkFg_xljYwhs?o}5E
zcjU=Mw?FPjblCx4Xbta4d?zMWc%?HeJGW&jbvmOi)%hsbInDjxBRp
zr4&^#G$T}=r%^vnsat1BZq`4gn2ZDyePebdKQZ6~KY_Vyi=sGK6+N)Dd8~~wt`;NF
z=K#(#&^T-Can-t1${Mse-|W(lHT|NKxM4;w5fYxsX3kvs$<~V5=;L^0^K0{oZsKsr
zCi-F;vHWM74#yw*=JVQ$Y|GAI%4EE`_&frL1G8oPOtS;=rPPujTS*i%xuW8Fk{NSB
zh1^09qx(XVP=f(#WEs_1F`~Tg{DlL>XDovCR?|wU{DCi~4x89h1khdnnUc>}`Nw{>
znlec|MDg0g8)gH&(`s5?^{b6SEg4-BoSLkBb~I_{^}?uC_Ny<-Q&haBCDc{~0!bRf=x7pfw
zW=&Kgukf9&YFX^DbVpF&o2d%u6?4ZNfJT#w1l#LzaC&KmzrV1Ax1yX23(;$ZZ@Rj#w
zHlP}4@b3_x2{?S3NV$~+6^vN4Ecy;lac~B>cRCm-fwF3hC9A7MC>1v(1=fD9f-XrJ
zK1TsI%4r5j7XsE2M8)J$9;3*pAz%)zgwI$b8W-)qSel#0B-%9`zw3C=iyeG#Lk?=U
zX#GKW?yk(Jkg51j?VumC7OnxJCYGmV0s0iOv|)Z4CgP}G{OYVzmgnnQv0o7Ahb1qL!Zw)JAE1aZI`wllXGCd>1{fH-b2a)zlW2m@hwMvWgnrSvgo*<
z8JTqm&wGRZ83Wo?dxR+mz6Yo3Y!-t66X@6hc!$sH+y~0JoB!+-T
zpM8$~zCEv-_Z+R7m>~hbI7cOlFAS{pMSs6o@jHz(kE@}iSo%4Mg$V4#^S%vZwOz-|
zE++DeN6$=IHEj4;PUksV?5?y`g`?h?M`dkwau-q!Op~$7Pv*0KRKwqUP^m(iC%bz{
zgHz-zCgYdvzhV)g5~Xx+f%8>T+^~jxpVe~{cHw$T0lU1i!dE-^8<*tu_rwloR(r3~
zU!TN%c$x72vc%qb=V+)A_q1HcV3u=cC!vj}P778zCW08tIIWn=NZpCX{YKKXK&z=V
zTb9>id}Q#Ug7>@N6}cYnqr}AKQ*ZLcSxdVPueTDqcsXyWUq@6Yb?_v1QP{zW>sNnj
z5#kgC#mP~~9%ch}BI^0{o_r~t=U}=Rv5=D)_B$T~tT2L72oHa%y>_S~-@e-$2t}dJ
zeb<*Cn|LQFJ0U1M9woAA%sIc2tlaFw@(_Q@&srI!?NnR0^;wt0q5uOG4I*)kAyk}<
zm)+S|NswrSuzL>9R?LAN5qxy>I0v>%QiHbge8!TvQR;mIg(j^$IVHy7Clq@Y%p8DL
z4jjN``HAvmB_PF0(wLa~r8Q7&FSlTc7BjT7d8UH8Kj;h1@>`^x2+@dD35zB2OMoF*
zG_cTiis>h-X18?Svwa=leXJryJ=o%tkH`$|5$RMJQuvgW$u?4z$Inkm{D>k$m`k7B
zG8;F6d8LklGU)pZb8L}EDZH1F9^mnyJW%uu@-qZ5!7Lc3g*8_-+|0)Lh^de;(Q#Mz
zDmYKJ+yyi%imkgmJ2wFuS$IBPHuc#i=4~EvW5V(2J60xBzG|$o)Zo@FHRxLL6vvc2
zSY}oBsI<8AoFJiGKPLDSfjN}oBe!Lk9U4D@pR@v5YpJi{MwcYd(;xnuQRX$hr*`$~
z);(%Ied#hA5~|2=s*89Cu!rM9738B5f^u5E{C2a|RN9Pa`pr20QLcTnNc^iVL*bH+
zL%AP)brj<(9aG`-jL|oJ&Q0uMO(Ndm#$k|zhmO^=b~<(PQaQRw451!pqIci>+{eXf
zs3}FPk0l2i=onnhKZ<^+{@R$CeaKVoNBuD`zDXwahxpNL!WS+|`n|{eVIG6;1@|fU
zbun$pB~cLuJs{P5;sR(-g(9aIDOJMLvi$gJgO1#NzU82wG*(+%6&-VXv>hjE6+-Ic
z=~(AQcN$uDbvwA)4Uavq0lZd;dyWciwX9A2d#|W5P4xuhB%aw>sp#LUTZzeQJasu*
zJgi5b(G}U{OtM<-%`f|A)8c8iR^L0g?Hjy9)W*5<;H|Iv!TRCY>|a!!~S%>5|xGQB|=RPlTlXFFn7v?X7zfysQ?d*$=jz*u8b
z*#mE@!PpavE*7OsFK49*;g6ti&^JxC)Z2PobI~1gsBb{Ev0Ds
z_!X`v(MPV4brLEvm#wxOPkX2BH@?146NxB7G_`WzPWNQqceXdTd)JblGU#qQy}UaI
zeYO;($N!^JUp|gnKFkop3T=o9wU?VwuFF4jc#Qip(n}{jH-A|z>Pg-1&WFb%S5`Ei
zeT4WNcYf%+(Y5jTas1FbedNVG-ZhB>t^1=F^6f?Zy+g=M6>V@ZONzVXMJuO3^H`y5
znuj!9dl$-hD{K{>u*4+WJ|*4t^0GnukN!M6_supti3(W?l9MO(XbPS`*z>ShK}RQl
z_As{(b+aYjQu3p^SrT_Qf?b^>A)0NO06VFZgs4U^WL*{W@?>gvLVe|dNox6iUym5y
z@R7mjQo6|t`H(jezb+#8B}}a;FNG(Y1Wg8N^O(e`i_n75Izii_Y};WEc5>8)g(F|K
z$Q3ItEJZdzqui%7okNdc@3y~sN<+zcgwqmCjMxvvi5>()B4QzLZMZ3eXR1-r!GkF}0CPQ!kgQd>{!lmTiZ=nqr^O03#P#C-RBT
zXD5&!=x7NG@}Px`AD)4Jj1URLu(c5|5heT*t%1l3%o?M8TL`tzQ4vf*6O&I6phLb-
z@&W23w9%nk%Clv|GC76%8TAt*Lm8&(p4Fb1(e6QmqwOrYnpph#MLw;{Yl#NiE
z>DhLfkx&F)Ioh07yNQebHqOj;G2kw&H6U_`!dj2H!>}}-9UK-QAxF<*#-1hOZ_DJn
z$G0pib~xC+h~VyLr>gCUbxC^1jiZcR&4Mb5dl;dX}zE`zkX!Mw2Y?oQ+J-MT)G<3amVTpR^@!yZ7e8
zejc9#ktpz^y6KJUORS`XWWpL;FSH|RDtjj9{>%*5)(_`*waO^`s6P1M+VvZ-UdXtP
zoZwr$>nb99`nH^IYz2R1Wq-VfLd$PIl$9fnQsZv0vkE%t@RM{+{%?0vnfrDcDV~4O
zBpPUZ?~AgB5_)98ZA^ye+)wT_mv=?$cCOULRg+wnQRkGHE9RAG16ijGa>qtM(20fg
zl{8@oO3zkkCH|0W0e2=nrY+A**xFanGErfB<{=H9Nm3iFYjmFhm-jR%+1`-
z%kD3wq7ZC^eiVNBq{aCf*uM9ae=3RM#R!&xQ*O+g7j7`6mn3;1)N&(klil7rXDBoF
z_~Mf}I$Dt#qaz!f%H?nP>d&HOKZ$Aos?FM2_4YnFKWeQaAEo;t>{B1M;u+2T%<%X$
z3%l8Dj(-`oQ`@UXuncrMk{6&MPlAa#Ry2|-aoSqo`@YN2hE*PKV4lNM#P`1Y;q->R
zvP(@yY)Xe%GK=h?lPbdJ(bnD%hoz`Z>J^>x<3UAtE$md&B~j|1_T
zC3=DRKj#nydayq|rY{9UHz
z=Zo7e0STvW`eidI!~dv#wcAu+GY-z&qVTRfcn%3<;dvT5oA_DE^Vk?;$s}}BO8pdP
z)qm>Jz;KWgN|`hnkqG^?GoYT4#4@YJdg6#*n(3EF#Hnm3U$+2{-Nk*(z{pf*q|#1^
zvZd4A`%>4Ml(=-Ekn(_+*~6(lyT6Ai&(ITh+mvNr8pKQt4F?B2Q!bX;gDdyZPK|La
zGo~ggn|zkm8h?%0B({4=`fSlQ$X$K7FG<7XA7N=z$yR7EyL>QtGKbyy4{<<~Iz
z6&(w=s)*Fji2G1BobHiNUSud>Z|uOsHSw(3iu3XkmLyN978KDN2p_>bq7F-&>=0bt
z{aHF|;1f9qOJ^$V@rjN~pAO+2)t`Mnfo=JNqRB%ev`z$uDeMspTxb(xc07HklX0O%
zs%TSHHP5>xD$_5>K5ZlNWLfEnDAAkujw7T$V8Nv-Dxr1^<2AHpUYk_*oNTJ@7I;4Q
zlU7H0pY!ykqEk|P|LU*^HoG8y&A+kjz?sdzQ^0JftDSO`@xD9D@Oqc0It9CBHpLM7LfA4W~c%==$u
z-V@6A9f=KEc~YlieAb87R#EjM^!(Egg8NV9Yu;5mq}yRQVTbbXe0VAe>hf8QO5eDb
z`t5XRIEr3Lhn6E!VNYbV`5G{vuM>{1YPS@(qPll1#YzrKcMge2R3^Zw+5EAM&s
zi&?eL>8uk4NeT9j>ALv>j2DR~+>r&Vkfbh^UwspJxl5T09FJt&KJ`n2I`*$bt^q5j
zezPaoS`~V9Hjf=DOEnmi((z6sp3TR_nB}uTB4tXxsbzR6u^XhhFn`ftMIbhg&$Awj
z2)Cbkj5Tk)Hh**Pi*=o2>6+bQt&{cBa(cc{J+^85GWN?AKEZ+nygW8Q$hb8tBYS!3
z9kuRP3YMN8tHQ+i#fpNosY}++-jW81WS#NDrG8??33{FH@k0&DjHsV&2swWwP0I3p
zm29{3Zel*N>7$R(X!u0#s1lux)#qU3hem9SL-^b_`sTgms6xeF?$QrhHBYm`?RN?;
zc1m<<;VWbc0u;+pQFH7m8@w7e>@=x+gj9YZuZi=Whu)^Ek
zsUE4N>!dz*s?D(!{YApBQZeVl96sD>v>g7W>GLzRj5I&1g4EwXBY&k$QAzzuiC#l@
zV9p;kD89sb{o+%A6vcXaOnw7IJS>|{#SkHUev*CHmGA<3(3nl^gryGy`yzjE0btd}
zF{@sUhm=*)*GafjV)Y~}tYU+~HORGCknwC#Mqw-R<4L^L0YUv>b-kOJ>oULGS
zkb^xfGW*%r34W3nSDGxHt7?h8IW66zgM3Z|8#3$6a#>F5K|eJ4gWiP{#&w>oDZ0d@JKnLRf2x-kOng199vd>9Q}rl9Dj6r)fB;_E|BYEto634K+?;
zt?svOtKcM498as_e|67*ODU#iF5DK^T3+nADw|d;TMXxCjq)VhdIQ}HjImN>^P>}*
z0N18crIq=6UsLZ=w%hJEy%@IR3wsolH_Nz^)E_yPlP9Q_)E>+uVH#C?&$E5(QToi+UV_YH|K
z71cWhJMV@{HtQ55D?WSE`q)`|ij!QOEd6QxZ(p?cO5`anM*XS5e8%-HJgtqNrW)RN
z&iE`WPw^eSTK`bf$YMXVP)Dt9EvjeKNqs<+tP(e*`=IImS$=TY{n>BUMe<6+0|lm6
zx<}Q%4sTzQZO?tXl;^6}ZL+bUI7ymV@cQ)Y(dvsO9pTUQT6a`mEKIMYy4|@H8Jswa
z%YNzvh}r$%%Q2`|S&^4Oi%S~ueR*LYQu0BqDC2{Di)!z4tl@$Z7Czsf?++TAXb2sf
z7e_AEA`44I_B(R>M!gH-SAJ2eomcxI
z@zIH+^~@Hf`!)mCsYeT4?(x{FC2nSM4i(Qd>+63+D}C&nJAT?XT}U^#_>`@48^;5q
z-l>*9X*QBGpM{4{CY)7xod~4pYjUXlsz{^a3o{bu^HCpF%-`zWy@)0~Tb|xssePny
zC#zF=(Cw)HRK!jKN1OoHFF+JSA=ko+8?Gw(9sg?p$g-#nxq_lD!b1M@2aP!N0T9t}
zN-znyyuJZ}F$!13E%9aWV?iexVntW@dL)=9-KR%_Qx_gUA6FYr!V+KYeU%6}(nCR4
zJ}QK)8M%lZxEx&M%JZpDjqt?9}RLiJ8w+4Y?RX+doDVIHrHgGDj5Bw!&6#Ges
z$zb)aU={@fBxIP1e4QAulrO`C?*-0BsVDvkcY@0W=g}-20>>w`y#XWNgG@JOlcr&4X;fP`{oSv=}=NSr&VD?e(xAIT*f{6(ZZMW9Fz@)f2PZB!OP
zikS2V?J%<~Dp^8V749D&w@e43&wp_A=1j_GhiobpjLszzF(^!a71M0kuGFW`XX7Xt
zkAJN@?_Ydgv*nRrFFc?5xg>EM!QLr!`EjjJOp|-o&FFXSKxwE)y-NL4w~l+g+N_LU
zru+!h2TdON6drzy_!6>Al2_*Dd${3mwc&Jl4LE5HYIshkaviD@GT6>zxfH5et
zS*&mA=>@_y8LjUU()`8C;*ARvX1L)tGulT94!={Y)SdD>eNvpi3%}NQg1kKNcz)DC
zU%beltY6ZXk7BBM7iWKbuzzvz@R3AF@kq;0hwwZy=9hSl=G)6j2TBf`Ow_Tom390{
zPjs-Ij|wRCeTO@zDRk88H3-Up9C?};_KcaRQOB>uLGRLecNh8mg|}7AohMQ$Assf0Wr_M*jdUT!p)2%c@X8LRTax6u??lrA4Ky
zNm7)Q{;A>hRZ20f3B5PHz82(p3Yf_x>COrIU+QJtxqOVm-TmGblW#Fr?i^jiu3>qO
z-Pi9FpZxoEC?zkYB%)ELPNNVut^7qgQ;neS=*=iO(|KC4v6&wf>z7OLoCd`hTdcI>
zDO>hzt55)Q6akQ!8;{n2tR07cfM@|J
zBWVZ#?Nf?C50@lr^q*>gPnlmVZKWc6#R}B9u$n+eU^uB~V$X1DQIe2Bq@Bqt^CFZ;
zGC}Xh6%s)-k+BDOpciokGJDVf2_^}VKE{BrZOI=>0fxxikSGF`5hRa@s83*1Uj*){
z-YTL|jmZTWh$KezELtG#>L3HEMN2y>w@|56NeAMtFiTYLDnI+E2$Et-bo8lWmU5T3$rFP
zr(;$W%W~k`VgiZZoyYR6h{uvDlVM6yB!x+>f}sbM2T@+1?NOAhWh;pkmlOhIleE<(
zv5kwZotQhpgZ@QZ7~68WO6Or^lz=7xi2SPfN0}1jgb+X=6-O0xBYhYyOr|#<$W_2(
zCsCe$C?y_NivUI93;C=aHA%llzrkkc27I
zuTp?X{ANhgzl!Z=3#C#@w9{_Z)mzRLb8=KokL`#RY;isG!JGWY@Lq
zKk5^jr{2poMWf{J_qDI*(2o+}w!4b(?!7r0>(dr2EbdnIjuh4tiQ5%6(@VQ;1;!8|
zK}#u1YXuFjWRd_%7wVF#@}I1j%uAQ;RZP_9XXx*3Y_{ipp1m%1dp%J<9F>|kivY=%
zff#M33yND>;!imB)w-BE*?nk&>K25gXjCl(O*?Kmf<)8SxBElc7>x6yH1Q`Cof!
z>8B^D?Po|&&2eEYK|;8@ETR%E-qe(BpQWguGz6w{8f6t>c)9a>h_r8Wps
zONvlheQDIA2Do^-)YVrBw3L;$ZEdG5y!YjE(RP0iDNfP&J9@A2vF)dZj-gz21EqLa
z#x0kJ=Ij~O;Fz_d-43m%8YDKdq4<)v!U$J=D%_B3>Rpw@%6KSKsP8&{7uUn0_*-6Q
z&wEvYjvpH?>E06YC+n+T#%Gm{L`MGroq8kA5DkeQ+Ehp-Baezk9ZaWUX=X;pakP8|z^h@ohU|;O3P}MzkT(4)`0h|#5~QR`hy->bwqdymM&})JQ?j6+@+&f*LJBu6
zD{vJ@e&01&YE~JwYf^zAN6ktxxe3dW*qB0nDFQv?dbwKVWzm!osDMcBJ*!|3B_wGe
z03)|*x-m%srpkx_2>wE(29rMWJD++3qxC;LucrM!%$eObHW$YaW6tvKuTyN<>wID5Rwp<`2rnacn!O*8HU1sH0)`H~~wfd*J@=2Zb
z!g66FJu^w|eSF*cU)W|G`D4)Rn&a3$Gn#PK<_jDJ^_xH1Nn7d0VgCT#GS{okaWdoU
zQY8%AlHni{Q_bOg62BD1RI3QNMZR0}SM*JC_nG0C2+3uZkMmDmU#911#B)`KWjLlq
z{iuv*CEH=nBG(VUQ*B=c6wB%Iu1ZuJWtAj)O38Ce$<&Y{dt3vukiknmSt(h!wy$-s
zxoY*-(Dochgi@hTD~;yX+?ThW<{h@l)>&U1>J|*v6z*3;I6cSgXKgMOukSXcgr(54
zt!P4nnhI2nE+y5tU@8+wYxo69@`YLo?>_!kc5Q6!ckjK|FCOs9inR2)zqflAOe?8u
zV>q@CgyNZh8_JiL7op4*-!s$apy+v*A5WG;1un19N}NWZ6jEhs`A--9H*~1Fly5C>
zB=_AnXLRlDzMQU41;y`EEcEDYUr#fZGm`SgC2%l%Y}0AwIPH?z^B;Qp*(9yR1f?of
zy~Ar(xJpzMl7((Vi5t8ZfzYc3Mx-p?J3Up~+g!H2+g4Y6(q;Ji^P9}SKIT^GXI*-4
z)K_luZXDZLnCMHlxo-K$eS%Qg)XlQtMY4sqLeL=rMGDlN(fT8^eVeU9j8+;>Rj()xoQ}QJik2=gV$SUyq^Zs&QCGa)Y&>$J(^~)U@J0AG7Wug1cpL
zi#wY!yHe%2yS%s#J|!ATE&>PsA(&Fo+mAM)-*N<#(86%)ju|RD$$Nj_%KIFO*vfgc
zMQHy3KP+~fzl360Cnn&p<_t3(vbA(KZHPMS46U0lEoNDI+1*M7NhlJMQlqK?R_fyM
z1q?KmNv4#vZ%Z#Gp1Q5Q*1707JiEs7HFdJpH|W<+X5Dx!)0R4e)r_jkuPV~*Lglbl
zu-=3ysf99!KX?)4Iui%D1|yF}*$8)at3#_#*~-c4w05_&^8KZ)-o_JqtFOPr^nyf5
zOl`e;v&ocFfII`({7?c@n+^<61JqUNfTP}kfl7()vCqd|_9?@4ra-i$DR2~QCR)JN%q9ec8
zngT6?wFShEAbU_9lEr}uh#PjR)Oi}^^_zJ!A5E&*!>BIJv+g$UTM&;hFi>JR?N$Jd
zB4bfWC%pmEx|`JOjnV9-h-Yr0zL1-zsxHM?@;NNJY#D{z2zkN}cM0&8LvucAcZ
z2Pb5F@_5+$Me|2jEHRvckyyUTnQ1@_KMi5scX@$C!d!Jj@gX~_N>k;hOv{@Z+(RA4Rv5&w(9#lDXKveC*4YFlDh;TR6cDAfRHoJPz7fYER=TX^
zl`R{0vw!J|wzj*>?5?caO4pmgMN)8r-+s3G7khhr*MxMQ$%~8*`xfeUKJB{+%+5R>
zz8GTMZ7ej^vSC4M(3oXJI8cQzND{O3db~!8#M6pyEj`t?>29w}-&-eZJGa&0^=L;4
zDO6XNCGvMX+v`41#H=30@tl7cw9VPz-6F>k7dGuC=LuULTPe0$8qyTvo3|@iCzvbN
z6*cv4%yDSblA}TiQ+BgmZ+Fk0_uE@Hp|8TRF^hLvwqtC!)E=N#T4t+!m5*m5Yxnz%R{_UZVe3$E<@xQX0^(8tyg^f7iSkqufTg61u$WgV
zWR!K5z8BSO`FwdC&LoqhoSurx&3bG@?>O#!vb4KH`C}HsFyFXc-V3)&k1_?-yM9t$
zZEZFe_ZDncIVGp(tO51ZN^jstvHoBhb3w$bQ+E;hGEs*A!#(V
zT5b96=4S|!S)}y*{Mha|KTh*jH)m~&W)|^0Ue{7vxWz881Y#~XcrB=ARYSf8xP+K6u0WLWry4Yz}qa_75712vibhq|<*~G*`g;n8-
zZ9Ur8;q~|#^Cf-nCr{Ee%Ab$aShzLx=^;#-hk$<%ZWmjC{1$c;H&5Ut68mN-nM>EmYZJwT#f0^
z3h5^l_th;fzu$jl`I|}5TMXHQbwdWmb4&O$t~q$CSA~mm!wxu=peeN^1to_iM?>H(
z4#?EGOnUwgO07D%tM7Yh`J#O#s(jtg8^x75MXEbm`tSC?r!9|6LX{*D5dyu5<^imp
z$AS7#3zBv0LHVE?Wuj6-lgG^f>RE_8kEgw5M&&Y<*hDp?@Sm+sL#Yw8vnPdAqZGLQ!c
ze`u(d6dF@I4gP3Uo4GWZKf({vg`|eSABq8I41IZ^0Hr_#PztR>x$VEwfDvC1VEj-J
zI!V}qV_`r;Z)5t<8djB!;3W2vYvkvk4wWeIMAYf^0nn)-K9Tv=pk*qJAdolq;;&BR
zDs6G*+%YmtdzzeGnNdewjgG*Iu}a_?N|tv3@3^FAs4rG+5>g{=yH%0gu2M&Aw6`J%
z1PzY;t7Z$CS=btA4Z=#k1#Bp{s2&IjMxaRVR)Pg8Qojh>;($jT$8cPwfMYqUGU5zi
z`F{_#a`N`+&%(ng5=?g|@Pv)jq5#~Avq-D5P=l1R$9879%bTn#
zAzP@igjzhEOks$fI*LFkdf{diNeTgC5)ObVQy#GNT;-#Cetv3tTt5g&rG2+N@5$B}
z)@tgF!|25AvhD!V4Xf^4Jhj|iSn35pX;!r@(vnmVVqhqcbgT2P>V`ezFw={Rl9r0e
zziWQ|wSK4AxFsbVI-^Z!wf3Hj+H}_qOE&qB8^m3;u~cbv1E~-|0e1)lq?ICk$ECC%QBuAa4iQ>)`|8cPU)43yE2ZY|yIOdEmhXQ~
zH1hBC{QG-4XGk)BGt(?170$PrUk+~X6@1%a%5n?9I)iR?`HF_rpp|Tb17Nutl%*8?
z0RaK;?$kg=HL3QXzZJ7Tf6HkTSRlIVPu=FJ-IJK6LlxUh7pP3esfMBTswYm
zv3r|PqWgy6DJr#UR6h}Q5>q8gTY$k*b$UL^ahz2=TJD}|X*9gj+5C3C*4JHUh@(z0
zyLOfJ_L`ZQ{59QWJVzJ7UBmGO+gK@Sx={fp;3=mAhe|>96u9ca2ji1pS>e6EsW&)F
zN4A@Dchg&ayw2YTsVK@Vy%DM%MCk5n!dbh@7*=A$?ehL0uKxfN(D6rVX8{)!k<}7$YGGmnr#jRA!o_f(pzx~)Re8QCuG-WhVkq}sc6mH&t6MQdo<^!
zrr&K}4>d`zXHSh(RP3Ujoh{4A@D+PEd1)v0HoKwSC+fb8KRey!>?xL6y-G1=
zFocv_~6|6B~CQ3haYU08%o?#
z&GYPqN8o~+V*LR4kpTJ>6c*48})x9gTMgnioT=P%=~+F9Fu&hpCR
z%XRy8CEX=<&aI$_66U0+A>f@-0(6xHpIt_ha#z(_+S~KyBTTxOI(}q42o@pyBHtBE0)6-**QjDgpd!DQ0{O5${Y#kWB7QGAfrs0JU
zOA8`K<7FX05|xdJ2e?okl7)EGSBp5+X*fKr?GZ7y-t}=&R5m6w57L0SG(urq_Mu$Y
zF|b06ZYso`ipd}e*a`WmMppw#k5t6`)PXsGG>QU9KB?8XRlvbb?NYn`+o$4gwlh~kC5vc9I=R#zIktRe(V?wnX?hQ{t
zCWV5_cih3C7E|#6m=pr5eKG*p{du4uUI|IwKeYh@`X)Ui^Pno1WCIkt=vx@YtK8EB
zD}fdAbG8yh00-$)gL?pgr65c}J3y;IBJrSqSqZeQb8S8))??5y9fN;R2JkTQ#&KL9Mt%7ls=We-HGl;|@k7>>EKQ
zB0CMatPpe5IxW;lNgMwFO0qEOVbpw&6@J(s5Vx6MDd2qy${5mZv0K*8BaI=V3cylG
zw`M1G`n=GebvC20t!zdiX72iKdXCR<*E`ml)0fonqblW0ad%dBc2=v!?%Q|Dzj_QU
z)1)2@sZgki04Hf1*T%b5+Nr}bxzpmb!^*5_xyV+U>;AD(7
zzxx+#i}zD*tNVl%3uY5;yPHPTkPE6O_bmB}BqR;tD_K;4pM&EuFBAU&r?Pys%+p%F
zmc5d`nqOVEDdA-w6D)mT%h+|cS+R1*8o9=jgKC5}q^ZPy_10QOgEAAT^rKM(XiJLp
zn2u)*?7Zt$m7V01dh2$*Y|`IbS?jaVyAz0Ur>?;|bCL@<_7K6lTWu#1Zt;dx_M|xf
z0G#6iNz{1#%d6Z8@>^EGBo9YIrCL~Z8M|h;KCa%nrK0GQ*&|M*Q)O@6cf~_Qwbn7x85$ZAN2D8KJ$})8zn{Vd5{IvFY9gZK1$txv)1W*Smb>tr8TTp1%suc`1%
zE2h)7{{Za0oQ>QZr&U_|etk_$#v8W5Gj+w1)r+-gU1jhZE?hTyr6pQjN@r(OC9)vv
z&_LXZ_Si_qQM1t=cZk*Q=XA9;@1{As_A?g&j7&3QexmiWPN5|W3eu+x5S?tT3R;49
z)u{JKDz_A+7&zMMT%OPTSmjiwO1+cabNt!gncZ^+V$9ffEoqxBac?rg2}&^wkl0bU
zXlV7RwN9r309oVANK_P*gx5cUN?2uRDEuPCSue27&=;S}Yq^K^gYD)w9H)XSL@GGUmm
zEt+x;V7al!t(bW?4%rol9b*r2N_049%v){wQ6qH7DkUXD9nF?mEB(B`J|(qazmzm)
z%yeI=N28hY*Dl>=XD%|r+i6xRe3ww$P7qaUT_aMhmI_>9N+&{r6EF^NSeJ*Wqkj+P
zo}AZ0j7=Q1P(<2
zbfQ4moxP|Da8Ij$r2r)+L=ZnZ03-lFAW!w7D9XaBFk`&cGEU{GSQg;>j%b?ZWQ{}%
zsE}ul#Lx=3cAnqTfJH>ijt{rJ0J6yvU=h#L{N8|FO58_3T8dAZW@NM~Nst0${-^b*
z!uykQu~4}H=}l(RCy|PkXnPvu)MPR0(-!yzdUm3#cw
z;4Udi;sg)nRy){)q_zkUW6~$;IH^T4q4s>HwYzds(+Tysr4Ry8H&puw2CRy^2gdh^
ztkaA2r^N1b%=g#9>j8{B>n{;0Tr2Sa0oXu++zq%B^S&k88hCm&D>r+${tu|&>Exjb
zYoXcj%gbys%I4DnYTrX_A2Q>z!6#OcJCiDvPRR#;YvJ<2M-x8wPs0~;UlWLNb~?4j
zHtTrBh1kRJ>($w)w8fpue87N7CP?7QluEe-?mG_=#Bh+ENmQw08eF(
z<{rG4iEel7JFTr|i;Gq;WCWIchT20n9$0lrLe@c2q5vszDl-RQnINe6Y)4@Gip@h*
zmHhW|a(uPf>#oPAp;b-^^MCAam5vF4W7rLihAU3bk(SNg_<8U_n)RE+fM!(1es#^n8}DM7=My`CqBarHjvNv!~)WiqDxx
z6U{c*n(gr{I~6Ox@)C_YUex3TZ4K&mXHNeByKAvgi@Q6)ty=Y!@2|DH-7hDvHEZ8e
z`kX;%)`qTi8>u;?FIit@J3Ly#{@n}nw4t>tP+KnRZ&=^bak|vg;ee)tf}>^Aqh{Iuol|Hg{4bzwIRVe
z3Wz5`1~8{4^+tv=X{MJWI&{OT7dd`xvtA(6D?(J#+q<}3wbNQt2|;iJG73N;B{LG0
z2~hR8CgV#}=lI_g?2Wq1nZpmgvADtRF{Uv*w`in1(=J`P=UM9qL3D
z#iTTluz*xb)2#1QwY$Zsnp*z=PDaME9_nUT#&TG|uJTV(vZ-a;FcN<4duXL8_L9np
z^)NyT3J&N7J*%0-aIPOahv$#&iWl
z05%cD2>@A%+|TJiF0~ybNdEv@0S6*?-Y5kY0q2do&;irt4T+iFgrh4GDT&lzd(gQq
zgmkB}WP8Y=TboaRfvE{jqXSW%E9B>JIza+889NXDYEaz8&_krlGTUUC0ITc%eJWtN+FF%Digu6IuY$2$+1f2_KmY;h`K`!C&nYx93-tI1GMmW}Rx9|q+md+2rC*NqDc7Z_|CbSNmxNjkO@
zuSktV5|~dOuW64ngvCp`l6oci?IVb%;!MW)P9ok6997_XVLFs`sx(N{N|m>$q#cKN
zKNYK2hN`PYujb@?*9WQ9mz^4aF+MKtoMlZkrMwwrXlMcG2|@n=?FrxxNY%ejhE;|h
z)VU+6hQzk6j?ahP{{Y#{NxK`ITG^CYyqlHVu$IG2q^ji$f=-gJTWcXue+Wz^Rnp?Q
zWuvygi;X49Bag?CQ?stM~!|}cn
zskKrn*6HWd&i?>YO0N^Ch@I*?ncpyVHs1owV&e%{-=4g-abcwdx>~%pcod~gCR7r(
zReTAMbgXwf@bKI%Vk<)G`E2jD>h9X|e=dxotz3|9Jk4Rmv-U3D6`XJjo3@4Hnuq_JcU!%~bAkU+1X(CqYM6ci~~Ds&Xel!YlRx}cLFl`JJdnzswXqXm{*C%LswypvXA`O({#
zaK_V!STOD-YEWEOlg7G
zwTh$f4&J`D+Hf?KDammPLV`k6l+UIT6C+pQ1cD}av2e4@&SY`QdLkL`3C;LDi&*Yl
zRm0ZGKXZbVDR5INXo7UOeJIc*h$&9#J8$8oDZw|n$i_Pv=cE{(1)Q?Qh7*pph~h2W
zYEqTw7T;+BNCdXG05Fi0`b>|)6p0|7s;c2fGJ2j%5d`YlGq>F2oX?PPOfwM6nCM%o
z4xBRmwo=Q5u&}2R+lkUp;s^?HlcSwr=`bzA;$1|fw<)qt%*t3dRS^1Jz&fpi;a6_7
zv|d{M?U0o{b^%K-CEyh8lE7NRE}ISnm1$a3l>u8cYdFg+{{V~S*X`OggJ21-M4kt7NKrJ
zl6=G?pbvEAD(bvZSe*!9m6p@MJN?o+_rckNdZYIB24lR5`FIo#>Wv(>vf^!arh@j
zc6&d@Y?EpLjr5PDb&|29TpBaH81Gp%%acG7l6RT`nIR@Wr2z?4iJA9`0EtjLbMgNG
zS_6u3M&s2$1xtcR*w729Qc2{0Y8gu8C14Y@cJ`<_qcV+%kPMB#O2ntoxEhQO0qyZs
zm~pt1gu;w^cN8)#wGaST2l>zou1|6a{N{j3bJ!|=cc2$hAn_uAq>Uj&9jXp4_eHUp
z7)%}De>%%)YGb_UPz1*@`BaSN@-;QIq>bWx5nm`#XJ$^rXlW!#NCd$$YWPLiu2Q(d
zQWLO9*zN}+uS(k(O5xgQm`O+{ald0(FkWp~^Ar+O@y}|UBX$nSZDvSFAOY>|TabaS
zc3WFrFf@b3Zc~X=o~7xQbdVt_gZ!%K?qR#K;L){DE`?K@*&2!ChaVUpM;pUZ!X8!jAj;Iy5*$N5xC8$HS!~CBKU(qqx#DrdVPj4I
z0PafO?S98!f#KZhN?++m&7V_oVQ+hhU%ZoTX5kJJz6Yj69p*?-k8(G!g?6@ASB~9J
zpzwI9Q`d7JCGCngmPe4k*i9taI(e8RivMEq$L_ccpwQoR{sD9#N?^n%~>Vuf6b15Ck*APF)lZn
zaeN}|TsH5X^U77q*RRx3uW(9WK+u$c1cNcWM{l@MyIW@l|N3+B;>
zT_`Nw0wRHK9-B~7Tx&7fLUsj>(<+tw6N
zp(Q3zV5})y9iGNDHy5GI;?r#yyCA0+uyo^g_D1FKqPG;5{VFpA06_o{0h6#h9f?wX
zL$vf~o{lzg)iY>u%tHdrIYn3Yc8*(PmQJ$XA!Trtr(CR|rKgx>MS23sK3P}_I%JO4
z8j_4Iisg@#o=3AVgIJx`TgRNZx4yY+8fEp}wQ5rQ3K?>O6kc~UH
z`RVzorH;*YV;Sch2L!^JV*+}3SgVUj_g|6FCk&HY`bn_!%{RGEzz75qZ-DQ~CTi8AogNca)S)t`QwAILgp`yH9Y&=~E0wLX*YnzRBHEqun46_uE%C04^_w7dGavia
z??<|YhO>|2cw5PqEHM`@6pTg7X2A(V*7&8R^576u+O(g>4O0IAt4`~E{i*cz*w|9E
zwLXY^B;%)~el;`31z-}cmGyGm;?^aEk0RF`WgcKsRGp?3Mbql8<_`gW{boAivXB
zPj2h#y`_9d4ys_?_k+K)XY}_T+M&J(f$j5ONIZD^O{4)E4|Bx|WbQnK@FVl~p%O|l
zzTh5c0I6G$Qm=X$O5}XP2NGytrm{%XM0=VN$ij&R2?m5gC=vngM{a0kI}{D5jrRW4
zT+uZ898z|jlkHK+$tc-@SP9fS{{TwERwN|g0GWVEtN~@D@f%P9TXF}a0oYIrsi`nY
z1LlBPO^^XHOho_%AaAkyP!dW`!attou+{f6n!U*e(m*;2zIUuUi}@Oo(oCd6Vs|FK
zVHj$5r90ghCeWx%&fe9t7`=!D2oQJQ>p=X+nr~5n6Z7<;hbtYQ54O~(36N`Mj7pre
zvD_`~hx|H|1A6FFflQ>JtF@%e<`-1Nr-y0mF-w8h%g_rM?}Kc||tZNI1D
zzDN4OLaeZqCrw>OrQf>C=Gp3dD>$jti{_7;4zXc37{(&oDJep%mQbWm<52@l?ejlK
z`L7X&;Y$}noTa*76V~wDTBO_`GeGlm7={{~>nl>S@0;W#DC*@SZ=nQlzvmU`Fuj?C
ztfsX800!l1x78SL5XJ4VacN|N%dfISs!AX!P6D4)7z#>2h#uf_cjUgyNB0+w%vPtC
zDYG8Cja!AM3`?qV@mk6WI)H>A0waMdo%^KiUAb^5!apyOEF`>Iop$Qo#HP)p7p|`m
zrKP}usHB2SAD!3RxEprhW9X`W7$eZqnn>;+w(hNR{qo#-t7cfYa^ch#bz%*Do}Amg?F04`|a
z_>3yGDD<&u$$7sRVM5aJ`;wBB+pRA+B@SuO*+?)ZP?Ul_pzN)ob)r+PEUg?iqy&W|fK&o>okBnhNfqd6aHPqZI-0vv3t+o#(`rdnyu^88Qp>~M0s=w}{}
zW}KOSm~(bl>l0YsXUgrqj4KD9#$h|)423o}Mtct6&T<1KUh+ua}eOFe>
z3?lj4Z!na;!q44ZBuaG>sr4GRd^RegRHbamwa)!*@HOMa-Jg3Mx8-hzSZ~6ARC+zr
z%r^nG#8}RmivTWMX1Cf&&n1YPyX8Xm@H3yMm3VQ
zWS^&1>~}ntCX%O8RNp550Kwi{Ngz)Zv}%U{O!4zT3CROsCEYPOs&
zsY`b|&KYJB6A1!qc9aU|vDzEAfB+xo`qt!QVxs2LK_Ey-_lmj^U}X%!;v?p)F(+c@
zZqa}MG5zacrMs8rSrM=?9qQ<8V0pW62|wVgp&nqzePGE-&UqUX!gUM+5CTS;Em3@V+IE#&`Ygh>gGcCAID|cXrU2@<&Lx^QCP#?opz)=yX86a(1m9X9W
z@bo-gYR;bK!sI&(EO1}kB{z1f4G@(sN?*28iqg1=3IjsoBrjJW@1a8v1*2OKo+fhJ
zV>01amiT?6c%{QvSe2?lV*cTPR$XX=sHr1$q(WXo#KB6V_>y$^x>BOmu3JKFJxkUg
z@%@=@j0Ww1pD|A*I-$F?DGCm@f;JKfB?r=VCv?vI96ji(OZ*#myU^(Q52|>QZEbKI
z!lLFSYj(Umxa7;hDj*dhB+TiBb|A^z16}S9Da~3&!>p1_Yu`Y6LYGFSe86O76@sQ0+Vi@6=_X`!96(<0*qYVq(>
ziC8x8nQGUofp2i-#j>~p*BQG07Kk2O8%j~TUca8FcL`TCa=p>$H%$7Kjr1ozT#OqP
z$1waWAY(S&OUrj{JW}Y%c$r0}FK(V-+ncLrw21(?_#@L?>UfnLwIJ=4-`MvzTyC6X
zw0}d^oL9$pUvO)@xowzp{iWTVg|papF=&e>Qq~~6I~84?BBiJ&X4K9pg5p6#acKR*
zWThz6y)G?@Z6_<)Ta}%irSpux>AL7s4jn^VD>v)SY4U5;Bi4TeonB|EnL|9-qYk;k
z?6B-$U0~>aI$Ty4bkbIk;Oy&H2)(y(ajl|8#8;{8&NbA8>9uUG>;9MZQo4R;2aRE}
zN-|v5zN_x6Zs(Gr1Pv0X@S717&q_2z&Y&?L%a&=CkahjBm{AtSLPngg0fsF)wp
zfUlPz^B*7DfRsj=A}9$E4orPA}6R|VD(xWSLGZ|O{)2IM=
z?@*jkVpduTECII45gcx61cgdA-;e1)MQPFu$NEqLErSMj_lg23XdR@9{&WP?fhW?h
z(ty;oCIV6E2EJ(Y*z6NZbwLQ8^%+l~E|!e!JtN=gS4CaeqE7DTZ^JCXDN&Gl_^rxt
zFn5b9Z7@KMwynxgNTIuzmRU^hJN~t|81rRvm9`yizBegMRfTFz4z|wV!GrNuQ!|@O
z7khR$0Vpx_tDvu`686g{Nc5let(ZppmAUjCLqb${AZ^83LA#AMmi)wrM7Br@k8b|}
z%4oEAF`C+=IkvGlApWq4Iw(r1}HN9p4hzqRqu@NU=4Q_UTWsGtH9JDv{5pK9W8E+a4DC1!dY
zUlDdq^fV8wctfhgGWoc+bQ1IKI^dO~LLXCU{YaBB8jKCA=&5Fk)aPOG(syjutU|#y
zEh+mZtxFH6H|t)-Y9T2f@{VBb@4(w;EzMa9*ruJ%*Ojf&WtNt;n}?9&W#+vFURpgw
zog;urQvU!U^c<&I)02b0=jF}X6?{2_Si|jCg?;+o`(#cd@vt_$(5^
z%X>Tgyi&k-?pl`m7pM2c5+rZgAqG!z9qV>@RZ
zKHjvqq>W0_>W8Oo!~+AkQ0JIZ!b&quGt1*?-8W-2WUJS%vNm7LHtKHD$XX#zp|^ob
z86p;y0tcWSFy{I|NLYwa+sE*T_fDlf*~!Dr-6NWN`y
zRN7J|6{#B_nM%@92`U8nTuug1+}v`pjIezE_APAVwjXuHFx3+6zm`D{HOv9HONsTT
zSFlKe3654Kj%SgV!ls#vu42l}<{PwQ*k#(7yVND*q?9e@hy%?*{w*P}p~(VB?sqT?
zx^|9>a;Z(rb3XJ_sOi4Cw8AkxSZA2OE!_~OEmRf(3M26-5|I0CEoA=Z9i1B$=m(
za!UM+*P60c1&%7^IOEKqTBRkb6YXXvJ6%e_TSsxpICOTzClbho95fxII6*KbI#9_ct$bZzPAcU45^$icjHIV3Rx%INRjbhL&(n}aI&Rs!T6>|0I7o;fS;{TBCrHl;z)o$N{pe<
z6$vEwC$#Kp0E8xZ^!tCGN&=xO`cM^6gr<3*H~>gVRLud!0tr^pKm^BmBeejdi3e#X
z;)LUVLheBWx%m}CjfuuK7I89nB!WHbIaweD5JA)@x6Lx?a8XDwcc_42vL-=4+JG`x
zB!LIsh8&V9<;LEl^E5JP@g(V;$9nm!mYN+zt$PkIkb5AXy6E1bC(cANBss0jX*vlV1kHW8;|r2hbF
zkRnF#2rA?2KzNJxu5_x~2))XER>H8>(8e;0n;c1KARoW7&bHUT@i^L2K2*&2HS-?c
zaFrD-WhJbZzHKcZt6F;xTiNQl*L3-9=+6jqnPJSn{u_uotBY~!Do`dC7aR`$q;DOj
zzdT?ulEl`vFXXrSp31!}u(R2kBbn{na|yk}oGv&TZ9!{!MMW-oWl%@T)QCKOBh7uE
zhv28(`1d&NGK_ET$VqUv6P
z3WC&0Nm)qTzyUjZ^EE6F5{=Xr-hR@&P957aGs3XTXB#fqq!vPw)}G}jZH#lNaz2%_
z`?RHVehsyl>Gs%iQ;A;Nyi(1e3vFkW6#1T&{E!^q{vZcNKz`JyJGFV~Es|UG#4_In_4YuMX8V<0q
zK?7H(REXUnUAC?2JVhmz=7gyvW*p;*VXfho)~p~}Fxx3CD&1jES{A}mg{M*7Nhxdt
zKBz!Yl$9FsScp`HQ=5pU;`1X7S+#kQ^S!<}+csk>Vz&htr3rYa@Y_0N
z0*^4vX{|~eKQtC?*3a%!-`V*a9hUP32aEmX_L3EFl7?|BU=yuHBk=usqPpRq68zK>PqHGdv{eQS|Dr
zyfChnMJKV9TQqEXKWCB?joaAf95tZcFKmbG-)uOd)+J=(LS43bQ|bf|v@H6Nw535(
zG@nmOyLOW_@};5IvVJ9o*yem+r5T?LeRxgNg`0NXLRJ9hKp4NbK}ilVsbHZJl0G=$_T8@-1g)0dMW@dm-b=!NpNQU0pC;!P
z>E0EVx~*Y$_9E@8y*AG32iCjGbSo%c+$fL&N|?UMc!4Gqe(Y2`8s;jg)$wgFuZ8=s
zA91?rT0Wnjb13+3>xMOu`kmq1s#vwE(~MeT_*N^AzJ&m7aRQez_F%Sr2r{eewQqF>
zVB$(UmC5X3u`Z0^QL8l6pSsfbQnm*M$w%6-rNqK?-kf3BKCvbj*
z5Nqs*fqHI5X@Dgl@FIYTZ($Ih^`I465(wLqA4&i!K<~Eo9Ug#(m&^^ntybdDI1v-T
zH4t#5&aEVx0tfUQ{U{2OI3^GDpch`9w;Ov<3#;3|wG6dkVvtYZC);Wh?*Zum07tz-
z&h{k@h-gpZkItq@riQMyh)75
zgyHS94j%o|SzfdgEx{vil%)VG-Veoof8iaSrB4{~5ZV5Hy-%Xz@k=U4pPhBXpUXI&
zWW!s#KYD3xT#|zXAb>#nk~jC5HS`(JA~oX$G^}d2WW?FJ!cyJ4gs@m`q-u?ZfRLYW
z;xRtmtCLp}<@`>?IvHYOOfeXMw?-FaguaGSmlD|;fo&xNWBey@KOwM+rk!bI@f<4N
z8QLa8w!E@)4ZX8!IK&Y_)pDq{1I+Z)tfkW=cD9gi(E*s%HMEL
zoXK#dM@VUCNdWD>#v~7MH7b>}#{ABWY&(k3>KC_yr5JV2!1Rtc1
z(kR46s*i_biBfL%Em?;bZpqs_2T5)3EGa4k5aW7?ByTb&LHG2ZYTV%!q}8O(ZX*$w
zLmNGeTjBW18nbfgZIyY-ZH1J$%BN)m*n>On0fXAQwJT+9rfRzRm66$VP1XB2TQ{h=
zXg-Cw@PuhV0H~DcR>3DqQc(Z^K#1D2gM{Tg@AFer5ha$)%efx;yw#IgbvDX-TW$q1
zsVi}!l3^f;^An(YNF_;-2~hQWEHrNd`iz7?d#W=oZWYw(A@56I06>M;J7XsWrju@RXoeMHhqFXx|
z21J9fuR&7|ro2Y1Ws_sl9*pK&EI!>9*XmnWDR3#KTo4^>oytJio+J(7K#)Y&PYHy}
z3o^V+^F0ik*Itp@=4NpIpk=H$D^i<$Eu?<&r&~UdBfppt!5*XoAc-8lDTcbw6mM|q
z{{Yy{M)5__{2Q;`7Q;G!n{dn@_FdDq_?}no5GS@X=+e}w&V#{;VWqq3Ld=-
zKM;3n@P82<3CXOnj$sbrU1;e}DQAda*sqqqy<)j~%`Ud4
z2@3MAZQDia7SRNf+Mi&lO|$|v@l=zXrET>T>P^#0?;nxv?nlD0j}kpP%sp(w+rw}R
zT=9Zhwr9yT92jSlld~*5e<#
z)Mt58P=$Z)udr}jLaBv=T$R1re39WejU1Kb9#_BA*b7TsFJ3PBQg0)kJGBsPBxDkw)*95f;bSH%do
zazz^ubdT*rF?~Ul0!cG8Gbp>7RLUjw5$Wicoo;gr0C^1k)+*G>nqoH@
zB}O;QpXcYq-JGRsNXK}qw-|mcV{q~xU4nwxd?d^X2U0|GAgB)ejpn67uB}Mnzq`7XA3^R9{7pK69Be~-%$n%oEh$>sT=TW%bFX6d3%R?&@tZ_C
z-L<+>UPzM-D5w94B6tPi4${O?PPEqdgxppE``mS$f%dmoF>s7iG(jEVxv&s5(b-
zBk_on5w@)1`%tHI*9fTG%#I$KAj#Zm>pF<{yY#DXWw=i3dVfwQ5JmDcEoR?O^bl
zz7%Df$(eGo+d7?>?sIjDF7Q-XSw0Y6TM9%p9@5&BPzdybw542nbr2((-m6McjnQ2>
zrq1DIVTfWKKf|5E@dxcOmRXiW`jp^Igu0ay2^)wzEi?ECD;tz9QE2AsoeIK;P
zGB$R}HYg1_deZT1AI)g3t7po=b(FO+0mO+wfI`VkY8A|UF(}FlTk5{3=~2f#%5wB(
znRP=cxth8Gb#YKUN3C(``+O%EAGw&xKk^)eT$-zz}ArA+z@z|mRAP+
zQ2Bki9eKvmQ(H*YU$c9}?o;rOh1&%QCH1n^!Di2yUa?nhMhI`e~*#tWuHWGYo4NI
zeyL>^>~Z|Kwr{YsuMTy$nHH+Pw#;}_;c1jD4dNE}_GwVtMd%z8K3f5Ys~hE4*H2^5
zAN!M!wH>@wSjndJ>+*jMPguUrC0%81o%IK#S9Z9Cn^|iz-&$BE=kY4f%sxQ}zy29k
zMtG8Ay?s-$^HHZd3uwvP)BUoZD~(f5ai3IrGchOBJ4JgFgE-mTrkOg5L`k6+J#-{d
zrT_^vEu62fBB8K0RDt#8g>selCZ>cQWBbs{o%RB@cQMb!3nM1hqK>CeB$M$&B1=TT
z(to{GD04trZ37}|02)xOhW`MN;()1a7%2en`cM=tav}hr7gJFJAkYv?A|QfK^q?!{
zz<`+ir~-gsY<`??^`Ia@AObe+Kt>h+02F>y2A5i-f(av3aBJp}S{;M5S+%Ii9~H9*
zkaOsFd^w2HNUhpX=ulzrCk`<7j_#EHquF}V2ZQo
zU72rg2p#IwuIxZ%DKot9v>LWCuo^%KAtV?wckNRHsT+W5QCE6`Dw8lZOHoC*XQmn*?HozP{h*PpV0PH@p)k3EzJ(k;j1jcm4n4>3-zr!rUG3GN>qgp%tx^*
z-+KBF3gL63PsGlAYhGHK=hQ0}4s&l4NH(|n3&mlmONcAbM#cneqxI*Co-rjHhgOiJC{od&EKGqQ%#Bg)5PijVDB+S`
z1BX_dMf=Hpyei>V?kidXz>-MWumqj=A;Ogd_<>M7f;usv;IGW()RHUF@q24$e`9mz
zrM8|@fjqJXgr#zJAB-oMfRcU8u=tlbS&)=81D!D@S?Ug5a9;~dNz^)9gp@6MLbXS-
zeRC5bZKO=@pHYU&m5m1+&gRKwq<#9xsk5zwvbB_?kffu^Bg{b)tO-dVOr=x5eXK?o
zGQ3E_c4hMa0046P1irPyowjY|wBoca1PK~RAV@ncAiyAvdnqOgbX6A>;WkvSW3TvB
z%#Y()uR37&?p+k#+&b&pke3#R6oQZzf&yIB60K!Gd74yJ>h;#H9JFRCVB!~fBQjxc<&42+Y_%cP;rFRh6cVHmpr%&hR+2vi1#m0R){Qw;WIr
zgsjdbI|4h7)H4Q_`;#CP$gIEB(n1kv3r~#H^w0~*>K3aE5cKoOc
zl$82R&;nCnk+6ybo>Xn9r{X9LF0o*&gCZina~K_YI~~@_-b!Fe`c~W`$`0XV&;dIc
z?OXJfwJNE;hiSKMT0tVVAi0n)3^WWVkJr6j4a_-Myflq5z}uR*pt`i)!*lWTR>NR2
z+d>HgZ4HNHG_4q(u{wS)q1bGy7rsg
zI=^>SUuUEC{h9I)h~=Cytdof=C$!=iXt{M`Xt^F*9UgFOr`aWLCOc{PubE-!#to=!
z^*wBz-nui(uDiOYSiZ$9ZI^J?9{U*UEg+BHS~w83kO}m`-`j3#+(lHC`69(lGo@xM
zIlO872Kg+Y**v8vesG(jnM+#B1E?6p4ZpvRlb!kY}
zgaUOaD@an3d+dKD0C(WS_A52f2UDqqy1I+DGma6D+7!A0I#iJX1R!r4fwCm0@~)LF
zH{7Sp*v^G#7O_PB&AM*s*MalfU^o!zNKs3HmpD<9Q~)9t36ZaU{sLNMX01rw+}kIZ
zZB|cR7ubK#O1z7L-i0_|0;MKBE=ZCH5J}@;GmNEui(xKCyJA{W$79ZRX!k-_0?7>M;K_Ec8}r7V)VHz~Z@GjW+V8K)7;*xng;Y@Ei;(jTzAQvG*Y
z5~MiOvmtBHq3R%#5I0l;y75$$(&}wcn~l$*PX^svV>V?RD-TvUE7&%cy=q?dww`Cp&B=;117rE#-tITGgpaaFrUr
z&2#G)v+}dt+oL$I6+INb5B!=Tlrh}DpV{diX~FROv}6oQ!KW;+u!wref~&WaguIT=
zVc3PXXTrccmWB+T19dvIUv{A_Y`y!vJh!)7$H=l3V=20;pXc~DnX9O)7#_lFO072_Gj0>PqU64
z)EtkK^WF~X9>VdgF548hQcQw}TS~y5{{V_B>X>@UtvJ+N)z1xHlcwA{I~~ZC6SNNB
zO1!EH?5aVXCJCWv!caCON%lRcQ!T<210=`Ej%Y+0kTyO3v;fr5lQ2iYp_J@H$RvRh
zKa~qy(O)7&Zb#CFZ`283L=X>ssAO4fdXCAcIH#a08c#5N>6e!vMNJLJ1zSBhy&{0!oBwDI@t*qZ@#!NfRm_+*QEgG7JTLO#vWa
z84@OdC_5Pd{b&v$#UW{N2TIbQpnZ*DOI-o-kM@Ohg+3j;R5fLXTD4)*%;n}@;VaN|
zTDtS9T3e7H1YDO|j=|)6*UtE^2~e)m{x4PJpV{f|J#GsVo|>OEzpwjhc!$-jk9U_d
z-PO&`9-)(;xOEMMCsKTuN%PqQ-3kR^JIYj{Tnf~rp1U22bz-A*Vs5DARprdGaJ&hI
zQo9v5b!?TSk_r}u1f-vU6qE5C>dG>4PA0Wwb+N7%tu=|}3@*SM-EGODL@Jc
zSV3EAQcmJ@sCVG|){Z@0FU?HQe9X6(R=h6q+YZIqN-&$11zOyyH8mj091+Li(s!M3
zeTOc-5;96wI#_&Di?i71{adtqD`t!@5oCau9P&KF&hzOb*o98qd+}QsO0J!u$j&kf
zQqFZ;rF`ZkXNtB*-R|0TyHx_ML>(&KTgK%xJZ`AsyBIoIuFeiIHL--crsLQqZMVhk
z9a8X{mkfHnC>n|YRB$0fXr1TgySPYrj!jmyvDi9$ire0(;a8U{WmeaLO}vj+lnDcI
z{75oFk|$+I+$Mb;9J?Ad6K>_xGGLat-Wc=O46>J-Nqv_Bb?I;`k-q6Yk8oh_4MwFo
zYRN&{LmG6q6n_Z0%yut2<@0SQI*kd^vOq~n(68ZALc&OcsR|MfgEPa{GUH=&GA~pH
zA(Y*@!|_C7*-7Wxb><5E>zikbsah7=rb0-cN{J9wnL1Jq?55M$+X-V&Sz&(gIe6?XrL+COyW{6I|?1iCEsFWvGU8x6Bzc0m8A2CB`1UKu?&U
z+^IJW%1V#{0U^6AS|e8;%Z7UzcX(laLw|O;
zZWPt?N;*g-CKBef32tR2N(2Hl*J?FXmAagX(v&%m{iVJVUhzfYn>OINUlAA%Z@08f
z#ubXPRNAl%THVCIhhiR|P)ip~+PJv>2G#CGXBg5-vsY2y(`WGC(6a|S-j_GGuOsCP
zE#uEzXTGRl@7iH~I=0S@TJ>VX%NmiDFEM+|d@~+tDhE!SCpO_&P0<5nur5q#T%Acr
zG&kAXp89L=%Vp9psnv^3zklD{{$}fR!yIn|zQJ#tuwGliv%Xrq%hwjEkmehWStWN?
z_N5#LTT^(Qk^cai+vHawtXh(5W}5Woy{gR{F_qt*=hIKwlbzYm9uGQd<2OjVP8jXp
z5|sq$9$RlIO@KQM4j@;v?A=`WRpi?E-|Bh(FG)tbYo4c|Xh?}LYrJy;X(`yrpbnIX
z0KpV1o{vEKcMua4D>>!I8$|)nDExMVKv7MT$N+sP4i@0<0osOdt;N=jj>PZH3n8$w
z+7f0Xy$O)ee-s)51twxPkJ5k&o7-s#s5Nu|=$H}!p4Ct&rq6LS1w&*Ju=;z@1PFl=
zq48O>2BBCmAsZR_?Oq<74ve0sY_e-kk=y%L+#}4Ya^!X^R6!CTkDAzmD?25+0geFX
zermv8nQw4Nj^eLM#<_D#e&^{|!(cp=?Et{|sR4qXAt&4ES?8z>lmbbQPrXVlUqDG|
zRHPBLS_r|I)P7`fKu83T1cO*=;O9I*Rb&tV07pMlKzX_Lr+9HsUi>cREO!IKQHga6
zC2P#MQWA-B^3|{n<)TR5R?)eT#AK#$5p3Lpj
zn@-HrYs<0JZk@S{-m)-^%-Ws9aZ6MzYQyiE8dljo;L4JJi3&*u39T5y@V_53WI7XAb>#FN>#TcA9}7(lw+YN$u(v*p7Ns|M9KG7*EcSvUV;my&yP%B}v%KK?zrL70{)Ma@f|CbiX4pUE&Pi-Pl{(
ztt?_?l?MWrONodnDp1)%Bh(3)3Ybx@tf!%*m8Em6Wz4B#))o4wJm-A%}?QSJH^pc;G?w3=
zbFqW0G^JMIxkzD3_Vpi
zr_FqxkKFYb%CBY|UoGdHGdWG?Rx-XO&S$$V$YT@CSS|CGSj(*fHcipAYWRU8{{YS=
zJyoGyC7MqE0P0?`-=_9Y{{WG(7qaGif%YGD2EqKh;=`cLT_x4z{P?TPITB3aSImcP
zlnLz#Qg-7_cDpfFDy3=(-pw`W{{Y#0%^o#P_$K_(?+OC827je`^M=z5lnDUoni!L|
zyn+F%h@L(uP}pD}1I1pFpE-r
z%UJzavFqs1PeQY;1vk3U{XOT4S=S0Wl@OG(c-ocaI)S+L
z0<;P0N6t
zzC*q#0d=i9uM-ZgAdafjTrW6rfy5Ryj3jnYqWLH;fADlOXlYQD1@3|fo)icJ+E2XJlxZlfNvfH)F;+L0TE=mc
zi6MiC+rMsKLb&qoQy|9M6(qszPW`5(PF8vnicMU^If2J*F$O0O7ed1Am0f8hk!<87
zy4=9@z$9=)?db-)5SmfEjS7+RSro?ddT%nGJBQf1n|EGnB~Iz`oFJqH8QCh+#O)~}
zwlL61H`La|$C-)p?VcrTi)A&g{9C_dU$%J#Bov_ujWTvmJA!+N_dPmNZ^E}Z?i%?S
zXC&f}W0ts;<}ddpo6F?4Ztb$lkn$UpB!kJ;5J@}*Dl270X>&2>V-t&kgmpI&v$Zy%
zr)}K}ONt>E2wDOH3P<8W*0Q1BXx<3d&25R7q@m2KhItpuyRyPo^Xp}$%B`DA(dQvX
z6$nrXnM#0CQndpGLi#l&8F-4P6!bQ)!mcTbUtu|G0>yFsM-gt>=aFn~3cUXSx|UFa
zl8IRf)PkiaT7pzp$dgvpw&W?w%^rwgxdzKTWps>5#aZF1c@KwBojy~^Y$&NSxk}Or
zgUoi~x!9;FXwHTnrJiJFEAD|}IY*~hmR-c{F+2Q|i!t{r=fh`BtAB^@gW(0n6&$HL
zf!YYrjtN38GTK@l7mCc*SE~H6>_^!3n(>UKk?;ChjozT#;g<*uA#Z95v{$JLOzsL+
zOKL*4l*k~;Y8{)SIu)u?mfQY(zZniUYLw$NbRV|&!|q6LI`mV;R%gFWr%x_Y{{T+2
z({4V5vi4lYt&SIbsZj&WX-;8xjqmDJqJZ3kUCu1!2I{S(lit>G(*FScU%=;Z%8+r%
zK37l8OZ@7258;D4;@LY9$oNlD6>W^;mhL}>W>vU#PCbL&vzD_4r5)nt%{f~W9_0=$
zqgEf-m?+IolfSY|`gZ?uOo^upm@LNiuCxWS$i{`&g}
zxJsH{{OY!KQSd5`2`5J#8g~9k$J(`D1X;wRo`ZcX{-0CowGEv>&hk6g(2pxASY#$l
zMF=@1Cdnlr2?KH3gk<#$l!yWhd(gvt!vH`AeLqS9L0U|14FOL|z0dm40#Hx`leCHe
zD@foSNBYnL^7Rk^BmF2!F2I%mP)O&B7XjN~HzI%;f=C;IKm|$EW4CVg89M+ZLH_j#
zwe18zonD%NIVkRu4FRmyOu+K66q>p%as-bX
z2s>}@QUHp1_KpawH(+zPipV*vBu1_O0HrO=08+LZM|y;#eNZ?AC;$ig{MK?HVBZm)
z1j>FSI$Mo&>om5DdzA%UVHkvZ@T>5JCAY+Kk`z$1{{Rvcq3s&j%p7Y&Bj1zg`u)v}
zHYL)P`E&F>F?>pND>Zc2sA%c-eZv)j8v58?pwYr|Ed
z;?yUjPviFUJuNzx@hy!j>fQy2;H+8Z)@|G_VTfC|R;fXYyKU0uq23ZozNsGdRxMf%
zT?=TneGO{Nm`mAa#`@aC7w}|jHYZ%E7ODY3GI$9h2R@PnY-3)F@il)!%IeEOewkxw
zUA8J^PF(`4M_42!r_iLd>uwSJDgk5>xsgO00XS;qPpho_9<
zclmkDE$C$y530e0rAk081rnM0C0y+yYRWg2^8!k-Mn9huY|>vBOr+Z(@%Wr;_h~L9
z@C=2(jf`PR4yI`q65%))xKk5Mc#
z)s5|ywX_Sn2iyCynpBh{Qwk|cl6@y*1b6aBb6Od0tjiSI)X~PX)?)C5<143F!NoC)
zj29fbLw|B{ANWM!idu`UYy{|9?i({J)DWm4a6Ildys))ZRjc}QN1?+(VkzW&{(O&G
zzS*pkgmtUzqVZRTxJmn_2HrDf;=n#a+jXZBLbxJ8P*5;W=U(!)NOx+lD74qZ=y?c1
zsW{7;clIADJqznD2cPiEPN-wnZ`<7FECUPAn3|9_7`20c_G33RkT@>c+~N3%{{WuY
zD|njHr%6UGoSowyy*7S#ah7jlI#a0R)Y8@K?b)8Tcwfvb!#s6(FzQ9}+g;WnelXkT
zm9mp?ZQjE!l76nV()ZZw9^$lPCmcl@lif)t>wJvdsRb%kA>{t&*=iDC9tSaBL_Bn*
zbzrG%b|e|0iAAMydMEu23h%(atH@Z4`UPu52%#EAkYy4My)&06juAEarB`m
z#^98lL~1EeKgxtil}Ip2_n|d()y38zDqwvD3q_QjFbN+MQEe?)1h7`cA-56fLY1x~L6vv}=w4`d5>JHzv-{Xx5;T4Qx{@FN~r9+=J~_
z6}jqM*&C%P3IaRTuv!(DW`F=~#akV~`X_0CD&QR^LF{M(`bVco`Jg17_7k_wN*jVo
zi2?v01GP!75FkV#$oZ=PGGIy5AFW}#01GB!C-Uu3l(f(i2}*<$y&!=pPOVc`0)c7Z
ze3_s;^7}x16k#4Kx_xt+{f6NBF+K`FgL6|FY!0w9pDQ))kjM1!)cm&C9g
z^lu;Lzdij8{1z!i1LYq&K1Fl+lOc5YJAmXR7;TYG>LJeUN0Ks!xqJU+HIU41oXp5Ndq
z$X6FOM%7Is&?L6KML+mLPJpF%&`OX!k_T$#)}+(alF=RYw%N>ak|{57=)gsuERCAf!`Z^Z_IlGdg9&iwV4bdAR+xqG7~BC
z)(U-`Pyq%9-xIkyA=clh~}u_?T^WpRe5LBKL)o*iKR9deda
zi8T$}dVnekAt_d`bLrpYMAYcXqB3`OD4|O)c;8
zTe!9k<*G_<*}DuWHs9|dvQkjBCKeQjN|LR#7D$?$D@!W6)ARG}W^~=pqhAEQZpS*K
zkr#K2*kUWTX@+qeDfKWLa^lHp4z`z&6rkGDuf4KUB&3j%OoDaj%JQ;WetvyPPM?PF
zbF=-Soi)JoS3)q3r7)J-#Pa*CyXS`FTQ=&^FD*&znL^5@eFtM)-aUtm>35wzyv~;e
z#7Z?BIWuk7o$zlH{{UrQTCiMY!HFXGr8wIcC_a}Hac^L?5~6n2m9&=vyUI^_J#H0M
zVmN+Ru2u5?0AruUY4=!{CA0dU4BiZS*@R#g`D({HS}x}<*=Czudl5%5g5y?)2FES*
z_7dY)AM&ZNNSfZN;|bmDwDns*dVd2_jYi{dJD)&a8}o8*Zu(!CGYzfH=5NY5!ws;y
zb+wS9^H=Dxxp`mum1KvHxp8
zABy+w^E`f3w+<3E8i*7jCveGaP+=)FFmg8s)HahqQ7DnTiUK5VM&!^HBS?r%022V8
zOn0ClsRTyfPV_Ssn28g~8_>NZwg9jri1sxdKn40#N2nfZGkky-0FC$U%~lAM%=>)M
z10X?MfH*V-G?1wzPk5jPu20s0z&5CWAb}O-W3jhA&c$h^MI|!>xvSu`AYKrel7Kzp
zDrF6IE=)iikIt@+#s?6sreY094YsixX_>2|D}l5xl!@nhkO?q$`TBh*20IRBDp1@U
zR+;VnKU%fEV4;w9B%bw&HOvswbxf=CsQ_z0PSdgZsB*gmK!SJQn#?u@6%uzF_X4B<
zl|+rdr7g?@CvMdhuBKF5Jj?OR;3o$06JdzR$fDZKFhUq7=t7+hVFr#-s#K45yu(Jb@Rr*hTLP%?fam$0k#yJc%|^Ggl4
zc7RglvdJEm5D5V9A~&uUMy#f-bA86%!%p+wMRxlQxK*XQn3t9|1u~_i!d4aDK^utr
z*0ku~%*Kbxre6r`k8IVa5TQ8+o-G3&?ZoijRY3wZM4*5Pb#x2lx?w+e`(=3
z&AOJ`Bq>@;mh%K^+7d{D3@SL_imG%j%OhK<*Yesi3?;jqvM%l(x3*e?H$=iN?Npsj
z>j1za3uoC#Onx9LRO7rwmiI4KxRzkTSpA#kq#!9uZ(@}KsZoUpB0`D?;Ll^dZqtjk
zid@^+>^(NRb1?qiyu_^8Y+Jg}au9WSx{{QldxVDESd$@9@mAA{dyyM+1jF0SS-@Gl
zZr#Jrpdlzg)#jL7d)B6%N5oh5e6$=vFJr)_hVY`(G!V(P>0tp^t+FDJ_jP(JXJ
zGwmmkL6dirw&;zzn=g(f$H+I;q9l8c(T?k-ni
z*QvcH$MUV^x8B>hwic^OhRJaap-MV92T|B|)l;iOJr_-o!_>)9Owz81=O6o(@Mq%V
zCgPW_+kVf*0F;RUjlPsDj1yO4?zXZM1c*DwdI-YWqAK1A3i%_w
zLCtJf57HtaNcX8Ha0$e~5IL%+xCBQ#!0$B>4@iun^K3!`tw%9U^d#*18>MwzVZ@pIKt;r~(4t1NP83V4pKg^vn>OKQ+jb)rkxOF;0U18DqA8CB0)R-Yd
zdZx7))QgPvW%O%G4>V7l&b$4SeNpM{wq(b$9%VV^=F#O{Slf$zE@L)2B~S+!ZEvYR
z#*!47+*cB=4y+q%U#jQrT-Qxe)2CXTwA%jw56(w|op(k~%kMX5n67Y~D;T}=8(tXs
z4ZCoprWB+%{{SgMiQ{m4il#DzmZDLT)tZR?z9)v_&9d7;{5?fMms=Y;kVzBlNF>K)
zOm?blld(&Vg~HroNw{Ybuv(JReQqH;1p~1l5wd|3?Yf7xR_Wz;MAN!1p?4RI3rAL!
z+Yq^InT=hCawZ`s{UdHOR@AKRnMVCsfE7P5IIPl2Ey)23tTn*AdgkKZ;@0*2
zMPZc!VM{_rg#va7S71bHC$ZY8O(`GZV86;
zQ=(mNWo|0k*B#5at%&k8D0%NblqX{f^@jkN;49!)0*4QDD!9k{uk$+ij9hC_cjaT_
z4>@7jgE`*c6jh?wv2h05&5f7qkO`h;B0n#B^)%wLSo4Zjc4wnsX9rYQZQ`fHo?pgY
zwG3TnTq6#Azp^}+@m5r%k6?AIK<=vMV=oBiTU~$tk5d0cn}iJ}U{_kQJkS_CECqYoH(yKs-eh*B}LH2iy+zSO=$L0zPO6l+5g6C77m(8E-h@lE<%
zu2rD7W){bMbof8Y+R=@8iRpU_Ov+ls1Efd(iLSK{0gamQQj_*GT6{``U7V4I@qe|S
zzy=9EcZeJk`VSVCk2^m|O?0VtGl|b?FW_p=j$xy`jCXzZ*!`c@Yg!T1oXL6<5~i?c
z0R2f7yARX;DKGuC{{Y#2&3X2h3Z1TWJMXu*z!i(BWjOazZvo*<;KC2)Rz8(u{XOD$
z{wv;y{{ZG}H&RLadVB
zd0T(tGUea^{FJ1M*L{TIk